source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_sort_template.c
//------------------------------------------------------------------------------ // GB_sort_template: sort all vectors in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // macros: // GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend, // GB_msort_ISO_ascend or _descend, // or GB_msort_func_UDT // GB_TYPE bool, int8_, ... or GB_void for UDT or ISO // GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise // GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in // GB_GET(x,X,i) x = X [i] for built-in, memcpy for UDT // GB_COPY(A,i,C,k) A[i] = C [k] // GB_SWAP(A,i,k) swap A[i] and A[k] // GB_LT compare two entries, x < y, or x > y for descending sort //------------------------------------------------------------------------------ // GB_SORT (partition): use a pivot to partition an array //------------------------------------------------------------------------------ // C.A.R Hoare partition method, partitions an array in-place via a pivot. // k = partition (A, n) partitions A [0:n-1] such that all entries in // A [0:k] are <= all entries in A [k+1:n-1]. static inline int64_t GB_SORT (partition) ( GB_TYPE *restrict A_0, // size n arrays to partition int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to partition uint64_t *seed // random number seed, modified on output #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { // select a pivot at random int64_t pivot = ((n < GB_RAND_MAX) ? GB_rand15 (seed) : GB_rand (seed)) % n; // Pivot = A [pivot] GB_GET (Pivot0, A_0, pivot) ; // Pivot0 = A_0 [pivot] int64_t Pivot1 = A_1 [pivot] ; // At the top of the while loop, A [left+1...right-1] is considered, and // entries outside this range are in their proper place and not touched. // Since the input specification of this function is to partition A // [0..n-1], left must start at -1 and right must start at n. int64_t left = -1 ; int64_t right = n ; // keep partitioning until the left and right sides meet while (true) { // loop invariant: A [0..left] < pivot and A [right..n-1] > Pivot, // so the region to be considered is A [left+1 ... right-1]. // increment left until finding an entry A [left] >= Pivot bool less ; do { left++ ; // a0 = A_0 [left] GB_GET (a0, A_0, left) ; // less = (a0, A_1 [left]) < (Pivot0, Pivot1) GB_LT (less, a0, A_1 [left], Pivot0, Pivot1) ; } while (less) ; // decrement right until finding an entry A [right] <= Pivot do { right-- ; // a0 = A_0 [right] GB_GET (a1, A_0, right) ; // less = (Pivot0, Pivot1) < (a1, A_1 [right]) GB_LT (less, Pivot0, Pivot1, a1, A_1 [right]) ; } while (less) ; // now A [0..left-1] < pivot and A [right+1..n-1] > pivot, but // A [left] > pivot and A [right] < pivot, so these two entries // are out of place and must be swapped. // However, if the two sides have met, the partition is finished. if (left >= right) { // A has been partitioned into A [0:right] and A [right+1:n-1]. // k = right+1, so A is split into A [0:k-1] and A [k:n-1]. return (right + 1) ; } // since A [left] > pivot and A [right] < pivot, swap them GB_SWAP (A_0, left, right) ; int64_t t1 = A_1 [left] ; A_1 [left] = A_1 [right] ; A_1 [right] = t1 ; // after the swap this condition holds: // A [0..left] < pivot and A [right..n-1] > pivot } } //------------------------------------------------------------------------------ // GB_SORT (quicksort): recursive single-threaded quicksort //------------------------------------------------------------------------------ static void GB_SORT (quicksort) // sort A [0:n-1] ( GB_TYPE *restrict A_0, // size n arrays to sort int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to sort uint64_t *seed // random number seed #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { if (n < 20) { // in-place insertion sort on A [0:n-1], where n is small for (int64_t k = 1 ; k < n ; k++) { for (int64_t j = k ; j > 0 ; j--) { // a0 = A_0 [j] GB_GET (a0, A_0, j) ; // a1 = A_0 [j-1] GB_GET (a1, A_0, j-1) ; // break if A [j] >= A [j-1] bool less ; // less = (a0, A_1 [j]) < (a1, A_1 [j-1]) GB_LT (less, a0, A_1 [j], a1, A_1 [j-1]) ; if (!less) break ; // swap A [j-1] and A [j] GB_SWAP (A_0, j-1, j) ; int64_t t1 = A_1 [j-1] ; A_1 [j-1] = A_1 [j] ; A_1 [j] = t1 ; } } } else { // partition A [0:n-1] into A [0:k-1] and A [k:n-1] int64_t k = GB_SORT (partition) (A_0, A_1, n, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort each partition // sort A [0:k-1] GB_SORT (quicksort) (A_0, A_1, k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort A [k:n-1] GB_SORT (quicksort) (GB_ADDR (A_0, k), A_1 + k, n-k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (binary_search): binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t GB_SORT (binary_search) // return pleft ( const GB_TYPE *restrict Y_0, // Pivot is Y [pivot] const int64_t *restrict Y_1, const int64_t pivot, const GB_TYPE *restrict X_0, // search in X [p_start..p_end_-1] const int64_t *restrict X_1, const int64_t p_start, const int64_t p_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; GB_GET (Pivot0, Y_0, pivot) ; // Pivot0 = Y_0 [pivot] int64_t Pivot1 = Y_1 [pivot] ; bool less ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // x0 = X_0 [pmiddle] GB_GET (x0, X_0, pmiddle) ; // less = (x0, X_1 [pmiddle]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pmiddle], Pivot0, Pivot1) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && (X_1 [pleft] == Pivot1) ; // Modify pleft and pright: if (!found && (pleft == pright)) { // x0 = X_0 [pleft] GB_GET (x0, X_0, pleft) ; // less = (x0, X_1 [pleft]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pleft], Pivot0, Pivot1) ; if (less) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // GB_SORT (create_merge_tasks) //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. static void GB_SORT (create_merge_tasks) ( // output: int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const GB_TYPE *restrict L_0, // Left = L [pL_start...pL_end-1] const int64_t *restrict L_1, const int64_t pL_start, const int64_t pL_end, const GB_TYPE *restrict R_0, // Right = R [pR_start...pR_end-1] const int64_t *restrict R_1, const int64_t pR_start, const int64_t pR_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = GB_SORT (binary_search) ( L_0, L_1, pleft, R_0, R_1, pR_start, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = GB_SORT (binary_search) ( R_0, R_1, pright, L_0, L_1, pL_start, pL_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = GB_IMAX (ntasks0, 1) ; ntasks0 = GB_IMIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, L_1, pL_start, pleft, R_0, R_1, pR_start, pright #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, L_1, pleft, pL_end, R_0, R_1, pright, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (merge): merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void GB_SORT (merge) ( GB_TYPE *restrict S_0, // output of length nleft + nright int64_t *restrict S_1, const GB_TYPE *restrict Left_0, // left input of length nleft const int64_t *restrict Left_1, const int64_t nleft, const GB_TYPE *restrict Right_0, // right input of length nright const int64_t *restrict Right_1, const int64_t nright #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { // left0 = Left_0 [pleft] GB_GET (left0, Left_0, pleft) ; // right0 = Right_0 [pright] GB_GET (right0, Right_0, pright) ; bool less ; // less = (left0, Left_1 [pleft]) < (right0, Right_1 [pright]) GB_LT (less, left0, Left_1 [pleft], right0, Right_1 [pright]) ; if (less) { // S [p] = Left [pleft++] GB_COPY (S_0, p, Left_0, pleft) ; S_1 [p] = Left_1 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] GB_COPY (S_0, p, Right_0, pright) ; S_1 [p] = Right_1 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Left_0, pleft), nremaining * GB_SIZE) ; memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Right_0, pright), nremaining * GB_SIZE) ; memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // GB_SORT (vector) parallel mergesort of a single vector //------------------------------------------------------------------------------ static void GB_SORT (vector) // sort the pair of arrays A_0, A_1 ( GB_TYPE *restrict A_0, // size n array int64_t *restrict A_1, // size n array GB_TYPE *restrict W_0, // workspace of size n * GB_SIZE bytes int64_t *restrict W, // int64_t workspace of size n+6*ntasks+1 const int64_t n, const int kk, const int ntasks, const int nthreads // # of threads to use #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // split up workspace //-------------------------------------------------------------------------- ASSERT (nthreads > 2 && n >= GB_BASECASE) ; int64_t *T = W ; int64_t *restrict W_1 = T ; T += n ; int64_t *restrict L_task = T ; T += ntasks ; int64_t *restrict L_len = T ; T += ntasks ; int64_t *restrict R_task = T ; T += ntasks ; int64_t *restrict R_len = T ; T += ntasks ; int64_t *restrict S_task = T ; T += ntasks ; int64_t *restrict Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- GB_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; uint64_t seed = tid ; GB_SORT (quicksort) (GB_ADDR (A_0, leaf), A_1 + leaf, leafsize, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for (int k = kk ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, A_1, Slice [tid], Slice [tid+nt], A_0, A_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (W_0, pS), W_1 + pS, GB_ADDR (A_0, pL), A_1 + pL, nL, GB_ADDR (A_0, pR), A_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, W_1, Slice [tid], Slice [tid+nt], W_0, W_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (A_0, pS), A_1 + pS, GB_ADDR (W_0, pL), W_1 + pL, nL, GB_ADDR (W_0, pR), W_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; } } //------------------------------------------------------------------------------ // sort all vectors in a matrix //------------------------------------------------------------------------------ #undef GB_FREE_WORKSPACE #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Werk, int64_t) ; \ GB_FREE_WORK (&C_skipped, C_skipped_size) ; \ GB_FREE_WORK (&W_0, W_0_size) ; \ GB_FREE_WORK (&W, W_size) ; \ } static GrB_Info GB_SORT (matrix) ( GrB_Matrix C, // matrix sorted in-place #if GB_SORT_UDT GrB_BinaryOp op, // comparator for user-defined types only #endif GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C to sort", GB0) ; ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (GB_IS_SPARSE (C) || GB_IS_HYPERSPARSE (C)) ; #if GB_SORT_UDT ASSERT_BINARYOP_OK (op, "op", GB0) ; ASSERT (op->ztype == GrB_BOOL) ; ASSERT (op->xtype == op->ytype) ; #endif int64_t cnz = GB_nnz (C) ; if (C->iso || cnz <= 1) { // nothing to do return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // get input //-------------------------------------------------------------------------- int64_t cnvec = C->nvec ; int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; GB_TYPE *restrict Cx = (GB_TYPE *) C->x ; // workspace GB_TYPE *restrict W_0 = NULL ; size_t W_0_size = 0 ; int64_t *restrict W = NULL ; size_t W_size = 0 ; int64_t *restrict C_skipped = NULL ; size_t C_skipped_size = 0 ; GB_WERK_DECLARE (Werk, int64_t) ; #if GB_SORT_UDT // get typesize, and function pointers for operators and typecasting GrB_Type ctype = C->type ; size_t csize = ctype->size ; size_t xsize = op->xtype->size ; GxB_binary_function flt = op->binop_function ; GB_cast_function fcast = GB_cast_factory (op->xtype->code, ctype->code) ; #endif //========================================================================== // phase1: sort all short vectors //========================================================================== // slice the C matrix into tasks for phase 1 GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (cnz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (ntasks, cnvec) ; ntasks = GB_IMAX (ntasks, 1) ; // printf ("phase1: threads %d tasks %d\n", nthreads, ntasks) ; GB_WERK_PUSH (Werk, 3*ntasks + 2, int64_t) ; if (Werk == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict C_max = Werk ; // size ntasks int64_t *restrict C_skip = Werk + ntasks ; // size ntasks+1 int64_t *restrict C_slice = Werk + 2*ntasks + 1; // size ntasks+1 GB_pslice (C_slice, Cp, cnvec, ntasks, false) ; // sort all short vectors in parallel, one thread per vector int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t task_max_length = 0 ; int64_t n_skipped = 0 ; for (int64_t k = kfirst ; k < klast ; k++) { // sort the vector C(:,k), unless it is too long const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz <= GB_BASECASE || nthreads == 1) { // printf ("\n------------sort: %ld cknz %ld\n", k, cknz) ; uint64_t seed = k ; GB_SORT (quicksort) (GB_ADDR (Cx, pC_start), Ci + pC_start, cknz, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // printf ("\n------------skip: %ld cknz %ld\n", k, cknz) ; n_skipped++ ; } task_max_length = GB_IMAX (task_max_length, cknz) ; } C_max [tid] = task_max_length ; C_skip [tid] = n_skipped ; } // find max vector length and return if all vectors are now sorted int64_t max_length = 0 ; for (tid = 0 ; tid < ntasks ; tid++) { max_length = GB_IMAX (max_length, C_max [tid]) ; } if (max_length <= GB_BASECASE || nthreads == 1) { // all vectors are sorted GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; } //========================================================================== // phase2: sort all long vectors in parallel //========================================================================== //-------------------------------------------------------------------------- // construct a list of vectors that must still be sorted //-------------------------------------------------------------------------- GB_cumsum (C_skip, ntasks, NULL, 1, Context) ; int64_t total_skipped = C_skip [ntasks] ; C_skipped = GB_MALLOC_WORK (total_skipped, int64_t, &C_skipped_size) ; if (C_skipped == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t n_skipped = C_skip [tid] ; for (int64_t k = kfirst ; k < klast ; k++) { const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz > GB_BASECASE) { // C(:,k) was not sorted C_skipped [n_skipped++] = k ; } } } //-------------------------------------------------------------------------- // determine # of tasks for each vector in phase 2 //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 quicksort leaves // 5 to 16 threads: 6 levels, 64 quicksort leaves // 17 to 64 threads: 8 levels, 256 quicksort leaves // 65 to 256 threads: 10 levels, 1024 quicksort leaves // 256 to 1024 threads: 12 levels, 4096 quicksort leaves // ... int kk = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks2 = 1 << kk ; // printf ("phase2: threads %d tasks %d skipped %ld\n", nthreads, ntasks2, // total_skipped) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- W = GB_MALLOC_WORK (max_length + 6*ntasks2 + 1, int64_t, &W_size) ; W_0 = (GB_TYPE *) GB_MALLOC_WORK (max_length * GB_SIZE, GB_void, &W_0_size) ; if (W == NULL || W_0 == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // sort each long vector using all available threads //-------------------------------------------------------------------------- for (int64_t t = 0 ; t < total_skipped ; t++) { const int64_t k = C_skipped [t] ; const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; ASSERT (cknz > GB_BASECASE) ; GB_SORT (vector) (GB_ADDR (Cx, pC_start), Ci + pC_start, W_0, W, cknz, kk, ntasks2, nthreads #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; C->jumbled = true ; ASSERT_MATRIX_OK (C, "C sorted by value", GB0) ; return (GrB_SUCCESS) ; } #undef GB_SORT #undef GB_TYPE
conv3x3s1_winograd23_sse_dot.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // #include "option.h" #include "mat.h" namespace ncnn{ static void conv3x3s1_winograd23_sse_dot(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt, int inch, int outch, int outh, int outw) { // BEGIN dot Mat top_blob_tm = top_blob;; Mat bottom_blob_tm = bottom_blob;; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); for (int i=0; i<tiles; i++) { float* output0_tm = out0_tm.row(i); float* output1_tm = out1_tm.row(i); float* output2_tm = out2_tm.row(i); float* output3_tm = out3_tm.row(i); #if __AVX__ float zero_val = 0.f; __m256 _sum0 = _mm256_broadcast_ss(&zero_val); __m256 _sum0n = _mm256_broadcast_ss(&zero_val); __m256 _sum1 = _mm256_broadcast_ss(&zero_val); __m256 _sum1n = _mm256_broadcast_ss(&zero_val); __m256 _sum2 = _mm256_broadcast_ss(&zero_val); __m256 _sum2n = _mm256_broadcast_ss(&zero_val); __m256 _sum3 = _mm256_broadcast_ss(&zero_val); __m256 _sum3n = _mm256_broadcast_ss(&zero_val); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q+1).row(i); const float* r2 = bottom_blob_tm.channel(q+2).row(i); const float* r3 = bottom_blob_tm.channel(q+3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r0n = _mm256_loadu_ps(r0+8); // k0 __m256 _k0 = _mm256_loadu_ps(k0); __m256 _k0n = _mm256_loadu_ps(k0+8); __m256 _k1 = _mm256_loadu_ps(k1); __m256 _k1n = _mm256_loadu_ps(k1+8); __m256 _k2 = _mm256_loadu_ps(k2); __m256 _k2n = _mm256_loadu_ps(k2+8); __m256 _k3 = _mm256_loadu_ps(k3); __m256 _k3n = _mm256_loadu_ps(k3+8); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); // k1 _r0 = _mm256_loadu_ps(r1); _r0n = _mm256_loadu_ps(r1+8); _k0 = _mm256_loadu_ps(k0+16); _k0n = _mm256_loadu_ps(k0+24); _k1 = _mm256_loadu_ps(k1+16); _k1n = _mm256_loadu_ps(k1+24); _k2 = _mm256_loadu_ps(k2+16); _k2n = _mm256_loadu_ps(k2+24); _k3 = _mm256_loadu_ps(k3+16); _k3n = _mm256_loadu_ps(k3+24); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); // k2 _r0 = _mm256_loadu_ps(r2); _r0n = _mm256_loadu_ps(r2+8); _k0 = _mm256_loadu_ps(k0+32); _k0n = _mm256_loadu_ps(k0+40); _k1 = _mm256_loadu_ps(k1+32); _k1n = _mm256_loadu_ps(k1+40); _k2 = _mm256_loadu_ps(k2+32); _k2n = _mm256_loadu_ps(k2+40); _k3 = _mm256_loadu_ps(k3+32); _k3n = _mm256_loadu_ps(k3+40); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); // k3 _r0 = _mm256_loadu_ps(r3); _r0n = _mm256_loadu_ps(r3+8); _k0 = _mm256_loadu_ps(k0+48); _k0n = _mm256_loadu_ps(k0+56); _k1 = _mm256_loadu_ps(k1+48); _k1n = _mm256_loadu_ps(k1+56); _k2 = _mm256_loadu_ps(k2+48); _k2n = _mm256_loadu_ps(k2+56); _k3 = _mm256_loadu_ps(k3+48); _k3n = _mm256_loadu_ps(k3+56); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r0n = _mm256_loadu_ps(r0+8); __m256 _k0 = _mm256_loadu_ps(k0); __m256 _k0n = _mm256_loadu_ps(k0+8); __m256 _k1 = _mm256_loadu_ps(k1); __m256 _k1n = _mm256_loadu_ps(k1+8); __m256 _k2 = _mm256_loadu_ps(k2); __m256 _k2n = _mm256_loadu_ps(k2+8); __m256 _k3 = _mm256_loadu_ps(k3); __m256 _k3n = _mm256_loadu_ps(k3+8); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm+8, _sum0n); _mm256_storeu_ps(output1_tm, _sum1); _mm256_storeu_ps(output1_tm+8, _sum1n); _mm256_storeu_ps(output2_tm, _sum2); _mm256_storeu_ps(output2_tm+8, _sum2n); _mm256_storeu_ps(output3_tm, _sum3); _mm256_storeu_ps(output3_tm+8, _sum3n); #else float sum0[16] = {0.0f}; float sum1[16] = {0.0f}; float sum2[16] = {0.0f}; float sum3[16] = {0.0f}; int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q+1).row(i); const float* r2 = bottom_blob_tm.channel(q+2).row(i); const float* r3 = bottom_blob_tm.channel(q+3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); for (int n=0; n<16; n++) { sum0[n] += r0[n] * k0[n]; k0 += 16; sum0[n] += r1[n] * k0[n]; k0 += 16; sum0[n] += r2[n] * k0[n]; k0 += 16; sum0[n] += r3[n] * k0[n]; k0 -= 16 * 3; sum1[n] += r0[n] * k1[n]; k1 += 16; sum1[n] += r1[n] * k1[n]; k1 += 16; sum1[n] += r2[n] * k1[n]; k1 += 16; sum1[n] += r3[n] * k1[n]; k1 -= 16 * 3; sum2[n] += r0[n] * k2[n]; k2 += 16; sum2[n] += r1[n] * k2[n]; k2 += 16; sum2[n] += r2[n] * k2[n]; k2 += 16; sum2[n] += r3[n] * k2[n]; k2 -= 16 * 3; sum3[n] += r0[n] * k3[n]; k3 += 16; sum3[n] += r1[n] * k3[n]; k3 += 16; sum3[n] += r2[n] * k3[n]; k3 += 16; sum3[n] += r3[n] * k3[n]; k3 -= 16 * 3; } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); for (int n=0; n<16; n++) { sum0[n] += r0[n] * k0[n]; sum1[n] += r0[n] * k1[n]; sum2[n] += r0[n] * k2[n]; sum3[n] += r0[n] * k3[n]; } } for (int n=0; n<16; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i=0; i<tiles; i++) { float* output0_tm = out0_tm.row(i); float sum0[16] = {0.0f}; int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q+1).row(i); const float* r2 = bottom_blob_tm.channel(q+2).row(i); const float* r3 = bottom_blob_tm.channel(q+3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); const float* k2 = kernel0_tm.row(q+2); const float* k3 = kernel0_tm.row(q+3); for (int n=0; n<16; n++) { sum0[n] += r0[n] * k0[n]; sum0[n] += r1[n] * k1[n]; sum0[n] += r2[n] * k2[n]; sum0[n] += r3[n] * k3[n]; } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); for (int n=0; n<16; n++) { sum0[n] += r0[n] * k0[n]; } } for (int n=0; n<16; n++) { output0_tm[n] = sum0[n]; } } } } } }
nmf_pgd.c
/* Generated by Cython 0.29.14 */ /* BEGIN: Cython Metadata { "distutils": { "depends": [], "language": "c", "name": "gensim.models.nmf_pgd", "sources": [ "gensim/models/nmf_pgd.pyx" ] }, "module_name": "gensim.models.nmf_pgd" } END: Cython Metadata */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_14" #define CYTHON_HEX_VERSION 0x001D0EF0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__gensim__models__nmf_pgd #define __PYX_HAVE_API__gensim__models__nmf_pgd /* Early includes */ #include <math.h> #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "gensim/models/nmf_pgd.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'gensim.models.nmf_pgd' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static double __pyx_f_6gensim_6models_7nmf_pgd_fmin(double, double); /*proto*/ static double __pyx_f_6gensim_6models_7nmf_pgd_fmax(double, double); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "gensim.models.nmf_pgd" extern int __pyx_module_is_main_gensim__models__nmf_pgd; int __pyx_module_is_main_gensim__models__nmf_pgd = 0; /* Implementation of 'gensim.models.nmf_pgd' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_h[] = "h"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_WtW[] = "WtW"; static const char __pyx_k_Wtv[] = "Wtv"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_grad[] = "grad"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_kappa[] = "kappa"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_hessian[] = "hessian"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_solve_h[] = "solve_h"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_n_samples[] = "n_samples"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_violation[] = "violation"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_sample_idx[] = "sample_idx"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_permutation[] = "permutation"; static const char __pyx_k_n_components[] = "n_components"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_projected_grad[] = "projected_grad"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_component_idx_1[] = "component_idx_1"; static const char __pyx_k_component_idx_2[] = "component_idx_2"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_gensim_models_nmf_pgd[] = "gensim.models.nmf_pgd"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_gensim_models_nmf_pgd_pyx[] = "gensim/models/nmf_pgd.pyx"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_WtW; static PyObject *__pyx_n_s_Wtv; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_n_s_component_idx_1; static PyObject *__pyx_n_s_component_idx_2; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_gensim_models_nmf_pgd; static PyObject *__pyx_kp_s_gensim_models_nmf_pgd_pyx; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_grad; static PyObject *__pyx_n_s_h; static PyObject *__pyx_n_s_hessian; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_kappa; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_n_components; static PyObject *__pyx_n_s_n_samples; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_permutation; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_projected_grad; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_sample_idx; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_solve_h; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_violation; static PyObject *__pyx_pf_6gensim_6models_7nmf_pgd_solve_h(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_h, __Pyx_memviewslice __pyx_v_Wtv, __Pyx_memviewslice __pyx_v_WtW, __Pyx_memviewslice __pyx_v_permutation, double __pyx_v_kappa); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__15; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_codeobj__20; static PyObject *__pyx_codeobj__27; /* Late includes */ /* "gensim/models/nmf_pgd.pyx":12 * from cython.parallel import prange * * cdef double fmin(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x < y else y * */ static double __pyx_f_6gensim_6models_7nmf_pgd_fmin(double __pyx_v_x, double __pyx_v_y) { double __pyx_r; double __pyx_t_1; /* "gensim/models/nmf_pgd.pyx":13 * * cdef double fmin(double x, double y) nogil: * return x if x < y else y # <<<<<<<<<<<<<< * * cdef double fmax(double x, double y) nogil: */ if (((__pyx_v_x < __pyx_v_y) != 0)) { __pyx_t_1 = __pyx_v_x; } else { __pyx_t_1 = __pyx_v_y; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":12 * from cython.parallel import prange * * cdef double fmin(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x < y else y * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "gensim/models/nmf_pgd.pyx":15 * return x if x < y else y * * cdef double fmax(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x > y else y * */ static double __pyx_f_6gensim_6models_7nmf_pgd_fmax(double __pyx_v_x, double __pyx_v_y) { double __pyx_r; double __pyx_t_1; /* "gensim/models/nmf_pgd.pyx":16 * * cdef double fmax(double x, double y) nogil: * return x if x > y else y # <<<<<<<<<<<<<< * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): */ if (((__pyx_v_x > __pyx_v_y) != 0)) { __pyx_t_1 = __pyx_v_x; } else { __pyx_t_1 = __pyx_v_y; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":15 * return x if x < y else y * * cdef double fmax(double x, double y) nogil: # <<<<<<<<<<<<<< * return x if x > y else y * */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ /* Python wrapper */ static PyObject *__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_6gensim_6models_7nmf_pgd_solve_h[] = "solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa)\nFind optimal dense vector representation for current W and r matrices.\n\n Parameters\n ----------\n h : matrix\n Dense representation of documents in current batch.\n Wtv : matrix\n WtW : matrix\n\n Returns\n -------\n float\n Cumulative difference between previous and current h vectors.\n\n "; static PyMethodDef __pyx_mdef_6gensim_6models_7nmf_pgd_1solve_h = {"solve_h", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h, METH_VARARGS|METH_KEYWORDS, __pyx_doc_6gensim_6models_7nmf_pgd_solve_h}; static PyObject *__pyx_pw_6gensim_6models_7nmf_pgd_1solve_h(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_h = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_Wtv = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_WtW = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_permutation = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_kappa; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("solve_h (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_h,&__pyx_n_s_Wtv,&__pyx_n_s_WtW,&__pyx_n_s_permutation,&__pyx_n_s_kappa,0}; PyObject* values[5] = {0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_h)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_Wtv)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 1); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_WtW)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 2); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_permutation)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 3); __PYX_ERR(0, 18, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 4: if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_kappa)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, 4); __PYX_ERR(0, 18, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "solve_h") < 0)) __PYX_ERR(0, 18, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 5) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); } __pyx_v_h = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_h.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_Wtv = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_Wtv.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_WtW = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_WtW.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_permutation = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_permutation.memview)) __PYX_ERR(0, 18, __pyx_L3_error) __pyx_v_kappa = __pyx_PyFloat_AsDouble(values[4]); if (unlikely((__pyx_v_kappa == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 18, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("solve_h", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 18, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("gensim.models.nmf_pgd.solve_h", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_6gensim_6models_7nmf_pgd_solve_h(__pyx_self, __pyx_v_h, __pyx_v_Wtv, __pyx_v_WtW, __pyx_v_permutation, __pyx_v_kappa); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_6gensim_6models_7nmf_pgd_solve_h(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_h, __Pyx_memviewslice __pyx_v_Wtv, __Pyx_memviewslice __pyx_v_WtW, __Pyx_memviewslice __pyx_v_permutation, double __pyx_v_kappa) { Py_ssize_t __pyx_v_n_components; CYTHON_UNUSED Py_ssize_t __pyx_v_n_samples; double __pyx_v_violation; double __pyx_v_grad; double __pyx_v_projected_grad; double __pyx_v_hessian; Py_ssize_t __pyx_v_sample_idx; Py_ssize_t __pyx_v_component_idx_1; Py_ssize_t __pyx_v_component_idx_2; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; double __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; PyObject *__pyx_t_26 = NULL; __Pyx_RefNannySetupContext("solve_h", 0); /* "gensim/models/nmf_pgd.pyx":35 * """ * * cdef Py_ssize_t n_components = h.shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t n_samples = h.shape[1] * cdef double violation = 0 */ __pyx_v_n_components = (__pyx_v_h.shape[0]); /* "gensim/models/nmf_pgd.pyx":36 * * cdef Py_ssize_t n_components = h.shape[0] * cdef Py_ssize_t n_samples = h.shape[1] # <<<<<<<<<<<<<< * cdef double violation = 0 * cdef double grad, projected_grad, hessian */ __pyx_v_n_samples = (__pyx_v_h.shape[1]); /* "gensim/models/nmf_pgd.pyx":37 * cdef Py_ssize_t n_components = h.shape[0] * cdef Py_ssize_t n_samples = h.shape[1] * cdef double violation = 0 # <<<<<<<<<<<<<< * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 */ __pyx_v_violation = 0.0; /* "gensim/models/nmf_pgd.pyx":39 * cdef double violation = 0 * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t component_idx_1 = 0 * cdef Py_ssize_t component_idx_2 = 0 */ __pyx_v_sample_idx = 0; /* "gensim/models/nmf_pgd.pyx":40 * cdef double grad, projected_grad, hessian * cdef Py_ssize_t sample_idx = 0 * cdef Py_ssize_t component_idx_1 = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t component_idx_2 = 0 * */ __pyx_v_component_idx_1 = 0; /* "gensim/models/nmf_pgd.pyx":41 * cdef Py_ssize_t sample_idx = 0 * cdef Py_ssize_t component_idx_1 = 0 * cdef Py_ssize_t component_idx_2 = 0 # <<<<<<<<<<<<<< * * for sample_idx in prange(n_samples, nogil=True): */ __pyx_v_component_idx_2 = 0; /* "gensim/models/nmf_pgd.pyx":43 * cdef Py_ssize_t component_idx_2 = 0 * * for sample_idx in prange(n_samples, nogil=True): # <<<<<<<<<<<<<< * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_1 = __pyx_v_n_samples; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel reduction(+:__pyx_v_violation) private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_component_idx_1) lastprivate(__pyx_v_component_idx_2) lastprivate(__pyx_v_grad) lastprivate(__pyx_v_hessian) lastprivate(__pyx_v_projected_grad) firstprivate(__pyx_v_sample_idx) lastprivate(__pyx_v_sample_idx) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_sample_idx = (Py_ssize_t)(0 + 1 * __pyx_t_2); /* Initialize private variables to invalid values */ __pyx_v_component_idx_1 = ((Py_ssize_t)0xbad0bad0); __pyx_v_component_idx_2 = ((Py_ssize_t)0xbad0bad0); __pyx_v_grad = ((double)__PYX_NAN()); __pyx_v_hessian = ((double)__PYX_NAN()); __pyx_v_projected_grad = ((double)__PYX_NAN()); /* "gensim/models/nmf_pgd.pyx":44 * * for sample_idx in prange(n_samples, nogil=True): * for component_idx_1 in range(n_components): # <<<<<<<<<<<<<< * component_idx_1 = permutation[component_idx_1] * */ __pyx_t_4 = __pyx_v_n_components; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_component_idx_1 = __pyx_t_6; /* "gensim/models/nmf_pgd.pyx":45 * for sample_idx in prange(n_samples, nogil=True): * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] # <<<<<<<<<<<<<< * * grad = -Wtv[component_idx_1, sample_idx] */ __pyx_t_7 = __pyx_v_component_idx_1; __pyx_v_component_idx_1 = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_permutation.data) + __pyx_t_7)) ))); /* "gensim/models/nmf_pgd.pyx":47 * component_idx_1 = permutation[component_idx_1] * * grad = -Wtv[component_idx_1, sample_idx] # <<<<<<<<<<<<<< * * for component_idx_2 in range(n_components): */ __pyx_t_8 = __pyx_v_component_idx_1; __pyx_t_9 = __pyx_v_sample_idx; __pyx_v_grad = (-(*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_Wtv.data + __pyx_t_8 * __pyx_v_Wtv.strides[0]) ) + __pyx_t_9 * __pyx_v_Wtv.strides[1]) )))); /* "gensim/models/nmf_pgd.pyx":49 * grad = -Wtv[component_idx_1, sample_idx] * * for component_idx_2 in range(n_components): # <<<<<<<<<<<<<< * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] * */ __pyx_t_10 = __pyx_v_n_components; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_component_idx_2 = __pyx_t_12; /* "gensim/models/nmf_pgd.pyx":50 * * for component_idx_2 in range(n_components): * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] # <<<<<<<<<<<<<< * * hessian = WtW[component_idx_1, component_idx_1] */ __pyx_t_13 = __pyx_v_component_idx_1; __pyx_t_14 = __pyx_v_component_idx_2; __pyx_t_15 = __pyx_v_component_idx_2; __pyx_t_16 = __pyx_v_sample_idx; __pyx_v_grad = (__pyx_v_grad + ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_WtW.data + __pyx_t_13 * __pyx_v_WtW.strides[0]) )) + __pyx_t_14)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_15 * __pyx_v_h.strides[0]) )) + __pyx_t_16)) ))))); } /* "gensim/models/nmf_pgd.pyx":52 * grad += WtW[component_idx_1, component_idx_2] * h[component_idx_2, sample_idx] * * hessian = WtW[component_idx_1, component_idx_1] # <<<<<<<<<<<<<< * * grad = grad * kappa / hessian */ __pyx_t_17 = __pyx_v_component_idx_1; __pyx_t_18 = __pyx_v_component_idx_1; __pyx_v_hessian = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_WtW.data + __pyx_t_17 * __pyx_v_WtW.strides[0]) )) + __pyx_t_18)) ))); /* "gensim/models/nmf_pgd.pyx":54 * hessian = WtW[component_idx_1, component_idx_1] * * grad = grad * kappa / hessian # <<<<<<<<<<<<<< * * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad */ __pyx_v_grad = ((__pyx_v_grad * __pyx_v_kappa) / __pyx_v_hessian); /* "gensim/models/nmf_pgd.pyx":56 * grad = grad * kappa / hessian * * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad # <<<<<<<<<<<<<< * * violation += projected_grad * projected_grad */ __pyx_t_20 = __pyx_v_component_idx_1; __pyx_t_21 = __pyx_v_sample_idx; if ((((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_20 * __pyx_v_h.strides[0]) )) + __pyx_t_21)) ))) == 0.0) != 0)) { __pyx_t_19 = __pyx_f_6gensim_6models_7nmf_pgd_fmin(0.0, __pyx_v_grad); } else { __pyx_t_19 = __pyx_v_grad; } __pyx_v_projected_grad = __pyx_t_19; /* "gensim/models/nmf_pgd.pyx":58 * projected_grad = fmin(0, grad) if h[component_idx_1, sample_idx] == 0 else grad * * violation += projected_grad * projected_grad # <<<<<<<<<<<<<< * * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) */ __pyx_v_violation = (__pyx_v_violation + (__pyx_v_projected_grad * __pyx_v_projected_grad)); /* "gensim/models/nmf_pgd.pyx":60 * violation += projected_grad * projected_grad * * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) # <<<<<<<<<<<<<< * * return sqrt(violation) */ __pyx_t_22 = __pyx_v_component_idx_1; __pyx_t_23 = __pyx_v_sample_idx; __pyx_t_24 = __pyx_v_component_idx_1; __pyx_t_25 = __pyx_v_sample_idx; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_24 * __pyx_v_h.strides[0]) )) + __pyx_t_25)) )) = __pyx_f_6gensim_6models_7nmf_pgd_fmax(((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_h.data + __pyx_t_22 * __pyx_v_h.strides[0]) )) + __pyx_t_23)) ))) - __pyx_v_grad), 0.); } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "gensim/models/nmf_pgd.pyx":43 * cdef Py_ssize_t component_idx_2 = 0 * * for sample_idx in prange(n_samples, nogil=True): # <<<<<<<<<<<<<< * for component_idx_1 in range(n_components): * component_idx_1 = permutation[component_idx_1] */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "gensim/models/nmf_pgd.pyx":62 * h[component_idx_1, sample_idx] = fmax(h[component_idx_1, sample_idx] - grad, 0.) * * return sqrt(violation) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_26 = PyFloat_FromDouble(sqrt(__pyx_v_violation)); if (unlikely(!__pyx_t_26)) __PYX_ERR(0, 62, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_26); __pyx_r = __pyx_t_26; __pyx_t_26 = 0; goto __pyx_L0; /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_26); __Pyx_AddTraceback("gensim.models.nmf_pgd.solve_h", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_h, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Wtv, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_WtW, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_permutation, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__12, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__15); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "gensim.models.nmf_pgd._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_nmf_pgd(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_nmf_pgd}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "nmf_pgd", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_WtW, __pyx_k_WtW, sizeof(__pyx_k_WtW), 0, 0, 1, 1}, {&__pyx_n_s_Wtv, __pyx_k_Wtv, sizeof(__pyx_k_Wtv), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_n_s_component_idx_1, __pyx_k_component_idx_1, sizeof(__pyx_k_component_idx_1), 0, 0, 1, 1}, {&__pyx_n_s_component_idx_2, __pyx_k_component_idx_2, sizeof(__pyx_k_component_idx_2), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_gensim_models_nmf_pgd, __pyx_k_gensim_models_nmf_pgd, sizeof(__pyx_k_gensim_models_nmf_pgd), 0, 0, 1, 1}, {&__pyx_kp_s_gensim_models_nmf_pgd_pyx, __pyx_k_gensim_models_nmf_pgd_pyx, sizeof(__pyx_k_gensim_models_nmf_pgd_pyx), 0, 0, 1, 0}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_grad, __pyx_k_grad, sizeof(__pyx_k_grad), 0, 0, 1, 1}, {&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, {&__pyx_n_s_hessian, __pyx_k_hessian, sizeof(__pyx_k_hessian), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_kappa, __pyx_k_kappa, sizeof(__pyx_k_kappa), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_n_components, __pyx_k_n_components, sizeof(__pyx_k_n_components), 0, 0, 1, 1}, {&__pyx_n_s_n_samples, __pyx_k_n_samples, sizeof(__pyx_k_n_samples), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_permutation, __pyx_k_permutation, sizeof(__pyx_k_permutation), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_projected_grad, __pyx_k_projected_grad, sizeof(__pyx_k_projected_grad), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_sample_idx, __pyx_k_sample_idx, sizeof(__pyx_k_sample_idx), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_solve_h, __pyx_k_solve_h, sizeof(__pyx_k_solve_h), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_violation, __pyx_k_violation, sizeof(__pyx_k_violation), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 44, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_New(1); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__12, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ __pyx_tuple__19 = PyTuple_Pack(14, __pyx_n_s_h, __pyx_n_s_Wtv, __pyx_n_s_WtW, __pyx_n_s_permutation, __pyx_n_s_kappa, __pyx_n_s_n_components, __pyx_n_s_n_samples, __pyx_n_s_violation, __pyx_n_s_grad, __pyx_n_s_projected_grad, __pyx_n_s_hessian, __pyx_n_s_sample_idx, __pyx_n_s_component_idx_1, __pyx_n_s_component_idx_2); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); __pyx_codeobj__20 = (PyObject*)__Pyx_PyCode_New(5, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__19, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_gensim_models_nmf_pgd_pyx, __pyx_n_s_solve_h, 18, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__20)) __PYX_ERR(0, 18, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initnmf_pgd(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initnmf_pgd(void) #else __Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_nmf_pgd(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'nmf_pgd' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_nmf_pgd(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("nmf_pgd", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_gensim__models__nmf_pgd) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "gensim.models.nmf_pgd")) { if (unlikely(PyDict_SetItemString(modules, "gensim.models.nmf_pgd", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) goto __pyx_L1_error; /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) goto __pyx_L1_error; /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "gensim/models/nmf_pgd.pyx":18 * return x if x > y else y * * def solve_h(double[:, ::1] h, double[:, :] Wtv, double[:, ::1] WtW, int[::1] permutation, double kappa): # <<<<<<<<<<<<<< * """Find optimal dense vector representation for current W and r matrices. * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_6gensim_6models_7nmf_pgd_1solve_h, NULL, __pyx_n_s_gensim_models_nmf_pgd); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_solve_h, __pyx_t_1) < 0) __PYX_ERR(0, 18, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "gensim/models/nmf_pgd.pyx":1 * # Author: Timofey Yefimov # <<<<<<<<<<<<<< * * # cython: cdivision=True */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init gensim.models.nmf_pgd", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init gensim.models.nmf_pgd"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
queue.h
// -*- C++ -*- // Copyright (C) 2007-2021 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/queue.h * @brief Lock-free double-ended queue. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUEUE_H #define _GLIBCXX_PARALLEL_QUEUE_H 1 #include <parallel/types.h> #include <parallel/base.h> #include <parallel/compatibility.h> /** @brief Decide whether to declare certain variable volatile in this file. */ #define _GLIBCXX_VOLATILE volatile namespace __gnu_parallel { /**@brief Double-ended queue of bounded size, allowing lock-free * atomic access. push_front() and pop_front() must not be called * concurrently to each other, while pop_back() can be called * concurrently at all times. * @c empty(), @c size(), and @c top() are intentionally not provided. * Calling them would not make sense in a concurrent setting. * @param _Tp Contained element type. */ template<typename _Tp> class _RestrictedBoundedConcurrentQueue { private: /** @brief Array of elements, seen as cyclic buffer. */ _Tp* _M_base; /** @brief Maximal number of elements contained at the same time. */ _SequenceIndex _M_max_size; /** @brief Cyclic __begin and __end pointers contained in one atomically changeable value. */ _GLIBCXX_VOLATILE _CASable _M_borders; public: /** @brief Constructor. Not to be called concurrent, of course. * @param __max_size Maximal number of elements to be contained. */ _RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size) { _M_max_size = __max_size; _M_base = new _Tp[__max_size]; _M_borders = __encode2(0, 0); #pragma omp flush } /** @brief Destructor. Not to be called concurrent, of course. */ ~_RestrictedBoundedConcurrentQueue() { delete[] _M_base; } /** @brief Pushes one element into the queue at the front end. * Must not be called concurrently with pop_front(). */ void push_front(const _Tp& __t) { _CASable __former_borders = _M_borders; int __former_front, __former_back; __decode2(__former_borders, __former_front, __former_back); *(_M_base + __former_front % _M_max_size) = __t; #if _GLIBCXX_PARALLEL_ASSERTIONS // Otherwise: front - back > _M_max_size eventually. _GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back) <= _M_max_size); #endif __fetch_and_add(&_M_borders, __encode2(1, 0)); } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_front(_Tp& __t) { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front - 1, __former_back); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + (__former_front - 1) % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_back(_Tp& __t) //queue behavior { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front, __former_back + 1); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + __former_back % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } }; } //namespace __gnu_parallel #undef _GLIBCXX_VOLATILE #endif /* _GLIBCXX_PARALLEL_QUEUE_H */
tree.h
#ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/meta.h> #include <LightGBM/dataset.h> #include <string> #include <vector> #include <memory> namespace LightGBM { #define kMaxTreeOutput (100) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves */ explicit Tree(int max_leaves); /*! * \brief Construtor, from a string * \param str Model string */ explicit Tree(const std::string& str); ~Tree(); /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param bin_type type of this feature, numerical or categorical * \param threshold Threshold(bin) of split * \param real_feature Index of feature, the original index on data * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param gain Split gain * \param zero_bin bin value for value==0 (missing value) * \param default_bin default conversion for the missing value, in bin * \param default_value default conversion for the missing value, in float value * \return The index of new leaf. */ int Split(int leaf, int feature, BinType bin_type, uint32_t threshold, int real_feature, double threshold_double, double left_value, double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain, uint32_t zero_bin, uint32_t default_bin_for_zero, double default_value); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = output; } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scorese * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; void AddPredictionToScoreGetFeatures(const Dataset* data, data_size_t num_data, double* score, std::vector<bool> &features_used, data_size_t num_data_fused) const; void AddPredictionToScoreGetFeatures(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score, std::vector<bool> &features_usedd, data_size_t num_data_fused) const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the traning process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 512) if (num_leaves_ >= 1024) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] *= rate; if (leaf_value_[i] > kMaxTreeOutput) { leaf_value_[i] = kMaxTreeOutput; } else if (leaf_value_[i] < -kMaxTreeOutput) { leaf_value_[i] = -kMaxTreeOutput; } } shrinkage_ *= rate; } /*! \brief Serialize this object to string*/ std::string ToString(); /*! \brief Serialize this object to json*/ std::string ToJSON(); /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool is_predict_leaf_index); template<typename T> static bool CategoricalDecision(T fval, T threshold) { if (static_cast<int>(fval) == static_cast<int>(threshold)) { return true; } else { return false; } } template<typename T> static bool NumericalDecision(T fval, T threshold) { if (fval <= threshold) { return true; } else { return false; } } static double DefaultValueForZero(double fval, double zero, double out) { if (fval > -zero && fval <= zero) { return out; } else { return fval; } } static uint32_t DefaultValueForZero(uint32_t fval, uint32_t zero, uint32_t out) { if (fval == zero) { return out; } else { return fval; } } static const char* GetDecisionTypeName(int8_t type) { if (type == 0) { return "no_greater"; } else { return "is"; } } std::vector<std::vector<int>> GetPathToLeafs() const; static std::vector<bool(*)(uint32_t, uint32_t)> inner_decision_funs; static std::vector<bool(*)(double, double)> decision_funs; private: /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; /*! \brief Serialize one node to json*/ inline std::string NodeToJSON(int index); /*! \brief Serialize one node to if-else statement*/ inline std::string NodeToIfElse(int index, bool is_predict_leaf_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current levas*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; /*! \brief Decision type, 0 for '<='(numerical feature), 1 for 'is'(categorical feature) */ std::vector<int8_t> decision_type_; /*! \brief Default values for the na/0 feature values */ std::vector<double> default_value_; std::vector<uint32_t> zero_bin_; std::vector<uint32_t> default_bin_for_zero_; /*! \brief A non-leaf node's split gain */ std::vector<double> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief DataCount of leaves */ std::vector<data_size_t> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief DataCount of non-leaf nodes */ std::vector<data_size_t> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; double shrinkage_; bool has_categorical_; }; inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return 0.0f; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (has_categorical_) { while (node >= 0) { double fval = DefaultValueForZero(feature_values[split_feature_[node]], kMissingValueRange, default_value_[node]); if (decision_funs[decision_type_[node]]( fval, threshold_[node])) { node = left_child_[node]; } else { node = right_child_[node]; } } } else { while (node >= 0) { double fval = DefaultValueForZero(feature_values[split_feature_[node]], kMissingValueRange, default_value_[node]); if (NumericalDecision<double>( fval, threshold_[node])) { node = left_child_[node]; } else { node = right_child_[node]; } } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
hypre_merge_sort.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "hypre_hopscotch_hash.h" #include "../seq_mv/HYPRE_seq_mv.h" //#define DBG_MERGE_SORT #ifdef DBG_MERGE_SORT #include <assert.h> #include <algorithm> #include <unordered_map> #endif #define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0) /* union of two sorted (in ascending order) array arr1 and arr2 into arr3 * Assumption: no duplicates in arr1 and arr2 * arr3 should have enough space on entry * map1 and map2 map arr1 and arr2 to arr3 */ void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2) { HYPRE_Int i = 0, j = 0, k = 0; while (i < n1 && j < n2) { if (arr1[i] < arr2[j]) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } else if (arr1[i] > arr2[j]) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } else /* == */ { if (map1) { map1[i] = k; } if (map2) { map2[j] = k; } arr3[k++] = arr1[i++]; j++; } } while (i < n1) { if (map1) { map1[i] = k; } arr3[k++] = arr1[i++]; } while (j < n2) { if (map2) { map2[j] = k; } arr3[k++] = arr2[j++]; } *n3 = k; } static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void hypre_big_merge(HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out) { for ( ; first1 != last1; ++out) { if (first2 == last2) { for ( ; first1 != last1; ++first1, ++out) { *out = *first1; } return; } if (*first2 < *first1) { *out = *first2; ++first2; } else { *out = *first1; ++first1; } } for ( ; first2 != last2; ++first2, ++out) { *out = *first2; } } #endif static void kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT assert(left <= right && right <= k); assert(i < k); // i == k implies left == right == k that can never happen assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_Int *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT assert(*out1 + *out2 == k); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH static void big_kth_element_( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { while (1) { HYPRE_Int i = (left + right)/2; // right < k -> i < k HYPRE_Int j = k - i - 1; #ifdef DBG_MERGE_SORT assert(left <= right && right <= k); assert(i < k); // i == k implies left == right == k that can never happen assert(j >= 0 && j < n2); #endif if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1])) { *out1 = i; *out2 = j + 1; return; } else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1])) { *out1 = i + 1; *out2 = j; return; } else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1]) { // search in left half of a1 right = i - 1; } else { // search in right half of a1 left = i + 1; } } } /** * Partition the input so that * a1[0:*out1) and a2[0:*out2) contain the smallest k elements */ static void big_kth_element( HYPRE_Int *out1, HYPRE_Int *out2, HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k) { // either of the inputs is empty if (n1 == 0) { *out1 = 0; *out2 = k; return; } if (n2 == 0) { *out1 = k; *out2 = 0; return; } if (k >= n1 + n2) { *out1 = n1; *out2 = n2; return; } // one is greater than the other if (k < n1 && a1[k] <= a2[0]) { *out1 = k; *out2 = 0; return; } if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1]) { *out1 = n1; *out2 = k - n1; return; } if (k < n2 && a2[k] <= a1[0]) { *out1 = 0; *out2 = k; return; } if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1]) { *out1 = k - n2; *out2 = n2; return; } // now k > 0 // faster to do binary search on the shorter sequence if (n1 > n2) { SWAP(HYPRE_Int, n1, n2); SWAP(HYPRE_BigInt *, a1, a2); SWAP(HYPRE_Int *, out1, out2); } if (k < (n1 + n2)/2) { big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k); } else { // when k is big, faster to find (n1 + n2 - k)th biggest element HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0); HYPRE_Int new_k = k - offset1 - offset2; HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1); HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1); big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k); *out1 += offset1; *out2 += offset2; } #ifdef DBG_MERGE_SORT assert(*out1 + *out2 == k); #endif } #endif /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_parallel_merge( HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = last1 - first1; HYPRE_Int n2 = last2 - first2; HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT assert(std::is_sorted(first1, last1)); assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT assert(begin1 <= end1); assert(begin2 <= end2); #endif hypre_merge( first1 + begin1, first1 + end1, first2 + begin2, first2 + end2, out + begin1 + begin2); #ifdef DBG_MERGE_SORT assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH /** * @param num_threads number of threads that participate in this merge * @param my_thread_num thread id (zeor-based) among the threads that participate in this merge */ static void hypre_big_parallel_merge( HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out, HYPRE_Int num_threads, HYPRE_Int my_thread_num) { HYPRE_Int n1 = (HYPRE_Int)(last1 - first1); HYPRE_Int n2 = (HYPRE_Int)(last2 - first2); HYPRE_Int n = n1 + n2; HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads; HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n); HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n); #ifdef DBG_MERGE_SORT assert(std::is_sorted(first1, last1)); assert(std::is_sorted(first2, last2)); #endif HYPRE_Int begin1, begin2, end1, end2; big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank); big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank); while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif begin1--; begin2++; } while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2]) { #ifdef DBG_MERGE_SORT printf("%s:%d\n", __FILE__, __LINE__); #endif end1--; end2++; } #ifdef DBG_MERGE_SORT assert(begin1 <= end1); assert(begin2 <= end2); #endif hypre_big_merge( first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1, first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2, out + (HYPRE_BigInt)(begin1 + begin2)); #ifdef DBG_MERGE_SORT assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2)); #endif } #endif void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_qsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_Int *in_buf = in; HYPRE_Int *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_parallel_merge( in_buf + in_group1_begin, in_buf + in_group1_end, in_buf + in_group2_begin, in_buf + in_group2_end, out_buf + in_group1_begin, num_threads_in_group, id_in_group); HYPRE_Int *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_sort_and_create_inverse_map( HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST); hypre_merge_sort(in, temp, len, out); hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i); assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } } assert(hypre_UnorderedIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #ifdef HYPRE_CONCURRENT_HOPSCOTCH void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out) { if (0 == len) return; #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif #ifdef DBG_MERGE_SORT HYPRE_Int *dbg_buf = new HYPRE_Int[len]; std::copy(in, in + len, dbg_buf); std::sort(dbg_buf, dbg_buf + len); #endif // HYPRE_Int thread_private_len[hypre_NumThreads()]; // HYPRE_Int out_len = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); // thread-private sort HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads; HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len); HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len); hypre_BigQsort0(in, i_begin, i_end - 1); // merge sorted sequences HYPRE_Int in_group_size; HYPRE_BigInt *in_buf = in; HYPRE_BigInt *out_buf = temp; for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2) { #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif // merge 2 in-groups into 1 out-group HYPRE_Int out_group_size = in_group_size*2; HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size; // HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1); HYPRE_Int id_in_group = my_thread_num%out_group_size; HYPRE_Int num_threads_in_group = hypre_min(group_leader + out_group_size, num_threads) - group_leader; HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len); HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len); HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len); hypre_big_parallel_merge( in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end, in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end, out_buf + (HYPRE_BigInt)in_group1_begin, num_threads_in_group, id_in_group); HYPRE_BigInt *temp = in_buf; in_buf = out_buf; out_buf = temp; } *out = in_buf; } /* omp parallel */ #ifdef DBG_MERGE_SORT assert(std::equal(*out, *out + len, dbg_buf)); delete[] dbg_buf; #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } void hypre_big_sort_and_create_inverse_map( HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map) { if (len == 0) { return; } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime(); #endif HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST); hypre_big_merge_sort(in, temp, len, out); hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads()); HYPRE_Int i; #pragma omp parallel for HYPRE_SMP_SCHEDULE for (i = 0; i < len; i++) { HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i); assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY); #ifdef DBG_MERGE_SORT if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } #endif } #ifdef DBG_MERGE_SORT std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len); for (HYPRE_Int i = 0; i < len; ++i) { inverse_map2[(*out)[i]] = i; if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i) { fprintf(stderr, "%d %d\n", i, (*out)[i]); assert(false); } } assert(hypre_UnorderedBigIntMapSize(inverse_map) == len); #endif if (*out == in) { hypre_TFree(temp, HYPRE_MEMORY_HOST); } else { hypre_TFree(in, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime(); #endif } #endif #endif /* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
model.h
#pragma once #include <util/common/geom/point.h> #include <util/common/math/vec.h> #include <util/common/plot/plot.h> #include <util/common/math/fuzzy.h> #include <vector> #include <map> #include <array> #include <omp.h> namespace model { /*****************************************************/ /* params */ /*****************************************************/ namespace consts { static const double eps = 2.17; static const double alpha = 2.59; static const double rho0 = 1.12; static const double rho1 = 1.8; static const double lambda = 21.0; static const double gamma = 1.20; static const double A = 7.05; static const double B = 0.602; static const double m = 28.086; static math::v3<> r[2][4] = { { math::v3<>{ 1, -1, -1 } * alpha / 2 / 2, math::v3<>{ -1, -1, 1 } * alpha / 2 / 2, math::v3<>{ -1, 1, -1 } * alpha / 2 / 2, math::v3<>{ 1, 1, 1 } * alpha / 2 / 2 }, { math::v3<>{ 1, 1, -1 } * alpha / 2 / 2, math::v3<>{ -1, 1, 1 } * alpha / 2 / 2, math::v3<>{ 1, -1, 1 } * alpha / 2 / 2, math::v3<>{ -1, -1, -1 } * alpha / 2 / 2 } }; }; struct parameters { // system params size_t n; // other params double dt, dx, eps; bool freebc, wipeke, hardwipe; }; inline parameters make_default_parameters() { parameters p = { // system params 10, // other params 1e-1, consts::rho0 / 100, 1e-9, false, true, false }; return p; } /*****************************************************/ /* data */ /*****************************************************/ struct particle { math::v3<> x, v0, v; std::vector < particle * > neighbors; bool outer = false; }; using particle_fuzzy_t = math::fuzzy < math::fuzzy_weak_double_traits > ; inline bool sameloc(particle & p1, particle & p2) { return particle_fuzzy_t::eq(p1.x.x, p2.x.x) && particle_fuzzy_t::eq(p1.x.y, p2.x.y) && particle_fuzzy_t::eq(p1.x.z, p2.x.z); } class particles { public: std::vector < particle > all; std::vector < size_t > outer; std::vector < std::pair < size_t, size_t > > edges; double radius; bool skip_0 = false; public: void init(const parameters & p) { all.clear(); outer.clear(); edges.clear(); std::vector < std::pair < particle, size_t > > prev_layer; std::vector < std::pair < particle, size_t > > cur_layer; std::vector < std::pair < particle, size_t > > next_layer; std::vector < std::pair < size_t, size_t > > cur_edges; std::vector < std::pair < size_t, size_t > > next_edges; cur_layer.emplace_back(); radius = 0; for (size_t n = 0; n < p.n; ++n) { for (size_t k = 0; k < cur_layer.size(); ++k) { auto & pt = cur_layer[k]; radius = std::fmax(radius, math::sqnorm(pt.first.x)); for (size_t i = 0; i < 4; ++i) { particle pt0 = pt.first; pt0.x = pt0.x + consts::r[n & 1][i]; bool skip = false; #pragma omp parallel for reduction(|:skip) for (int j = 0; j < prev_layer.size(); ++j) if (sameloc(pt0, prev_layer[j].first)) { skip |= true; } #pragma omp parallel for reduction(|:skip) for (int j = 0; j < next_layer.size(); ++j) { if (sameloc(pt0, next_layer[j].first)) { skip |= true; next_edges.emplace_back(all.size(), j); } } if (!skip) next_layer.emplace_back(pt0, all.size()); } all.push_back(pt.first); if (n + 1 == p.n) { all.back().outer = true; outer.push_back(all.size() - 1); } edges.emplace_back(all.size() - 1, pt.second); } for (size_t i = 0; i < cur_edges.size(); ++i) { edges.emplace_back(cur_edges[i].first, all.size() - cur_layer.size() + cur_edges[i].second); } cur_edges.swap(next_edges); next_edges.clear(); prev_layer.swap(cur_layer); cur_layer.swap(next_layer); next_layer.clear(); } double thres2 = 1.5 * consts::rho1; thres2 *= thres2; #pragma omp parallel for for (int i = 0; i < all.size(); ++i) for (int j = 0; j < i; ++j) { if (i == j) continue; if (math::sqnorm(all[i].x - all[j].x) < thres2) { #pragma omp critical { all[i].neighbors.push_back(&all[j]); all[j].neighbors.push_back(&all[i]); } } } radius = std::sqrt(radius) + consts::rho1; } double stillinger_weber_potential( size_t a, math::v3<> ax) { double e = 0; auto & ap = all[a]; #pragma omp parallel for reduction(+:e) for (int i = 0; i < ap.neighbors.size(); ++i) { auto & ip = *ap.neighbors[i]; if (skip_0 && (&all[0] == &ip)) continue; auto v_ai = ip.x - ax; double r_ai = math::norm(v_ai); double e0 = 0; e0 += f_ij(r_ai) / 2; for (size_t j = 0; j < i; ++j) { auto & jp = *ap.neighbors[j]; auto v_ij = jp.x - ip.x; auto v_ja = ax - jp.x; double r_aj = math::norm(v_ja); double r_ij = math::norm(v_ij); double s_iaj = - (v_ai * v_ja); double s_aij = - (v_ai * v_ij); double s_ija = - (v_ij * v_ja); e0 += (h_ijk(r_ai, r_aj, s_iaj / r_ai / r_aj) + h_ijk(r_ai, r_ij, s_aij / r_ai / r_ij) + h_ijk(r_ij, r_aj, s_ija / r_ij / r_aj)) / 3; } e += e0; } e *= consts::eps; return e; } double penergy() { double e = 0; for (size_t i = skip_0 ? 1 : 0; i < all.size(); ++i) { e += stillinger_weber_potential(i, all[i].x); } return e; } double kenergy() { double e = 0; for (size_t i = skip_0 ? 1 : 0; i < all.size(); ++i) { e += math::sqnorm(all[i].v); } return consts::m * e / 2; } double f_ij(double r_ij) { if (r_ij > consts::rho1) return 0; return consts::A * (consts::B / r_ij / r_ij / r_ij / r_ij - 1) * std::exp(1 / (r_ij - consts::rho1)); } double h_ijk(double r_ij, double r_ik, double cos_jik) { if (r_ij > consts::rho1) return 0; if (r_ik > consts::rho1) return 0; return consts::lambda * std::exp(consts::gamma / (r_ij - consts::rho1) + consts::gamma / (r_ik - consts::rho1)) * (cos_jik + 1 / 3.) * (cos_jik + 1 / 3.); } void next(const parameters & p) { for (size_t i = skip_0 ? 1 : 0; i < all.size(); ++i) { auto & a = all[i]; if (!p.freebc & a.outer) continue; auto f0 = get_f(p, i); a.x = a.x + a.v * p.dt + f0 / 2 / consts::m * p.dt * p.dt; auto f1 = get_f(p, i); a.v = a.v + (f0 + f1) / 2 / consts::m * p.dt; if (p.wipeke && (math::sqnorm(a.v) < math::sqnorm(a.v0))) { if (p.hardwipe) { #pragma omp parallel for for (int j = skip_0 ? 1 : 0; j < all.size(); ++j) all[j].v = {}; } else { a.v = {}; } } a.v0 = a.v; } } math::v3<> get_f(const parameters & p, size_t i) { auto & a = all[i]; auto U = [&] (const math::v3<> & d) { return stillinger_weber_potential(i, a.x + d); }; return { - (U({ p.dx, 0, 0 }) - U({ -p.dx, 0, 0 })) / 2 / p.dx, - (U({ 0, p.dx, 0 }) - U({ 0, -p.dx, 0 })) / 2 / p.dx, - (U({ 0, 0, p.dx }) - U({ 0, 0, -p.dx })) / 2 / p.dx, }; } }; /*****************************************************/ /* drawing */ /*****************************************************/ using points_t = std::vector < geom::point2d_t > ; struct plot_data { util::ptr_t < points_t > data; plot::list_drawable < points_t > :: ptr_t plot; }; struct plot_config { plot::world_t::ptr_t world; plot::auto_viewport < points_t > :: ptr_t autoworld; }; struct model_data { util::ptr_t < parameters > params; plot_config config; plot_data penergy_data; plot_data kenergy_data; plot_data senergy_data; plot_data denergy_data; particles system_data; }; inline static plot_data make_plot_data ( plot::palette::pen_ptr pen = plot::palette::pen(0xffffff), plot::list_data_format data_format = plot::list_data_format::chain ) { plot_data pd; pd.data = util::create < points_t > (); pd.plot = plot::list_drawable < points_t > :: create ( plot::make_data_source(pd.data), nullptr, // no point painter pen ); pd.plot->data_format = data_format; return pd; } inline static plot::drawable::ptr_t make_root_drawable ( const plot_config & p, std::vector < plot::drawable::ptr_t > layers ) { using namespace plot; return viewporter::create( tick_drawable::create( layer_drawable::create(layers), const_n_tick_factory<axe::x>::create( make_simple_tick_formatter(6, 8), 0, 5 ), const_n_tick_factory<axe::y>::create( make_simple_tick_formatter(6, 8), 0, 5 ), palette::pen(RGB(80, 80, 80)), RGB(200, 200, 200) ), make_viewport_mapper(make_world_mapper < points_t > (p.autoworld)) ); } inline plot_config make_plot_config() { plot_config cfg; cfg.world = plot::world_t::create(); cfg.autoworld = plot::min_max_auto_viewport < points_t > :: create(); return cfg; } inline model_data make_model_data(const parameters & p = make_default_parameters()) { model_data md; md.config = make_plot_config(); md.params = util::create < parameters > (p); md.kenergy_data = make_plot_data(plot::palette::pen(0x0000ff, 2)); md.penergy_data = make_plot_data(plot::palette::pen(0x0000ff, 2)); md.senergy_data = make_plot_data(plot::palette::pen(0x0000ff, 2)); md.denergy_data = make_plot_data(plot::palette::pen(0x0000ff, 2)); return md; } }
pdmemory.c
/*! \file Copyright (c) 2003, The Regents of the University of California, through Lawrence Berkeley National Laboratory (subject to receipt of any required approvals from U.S. Dept. of Energy) All rights reserved. The source code is distributed under BSD license, see the file License.txt at the top-level directory. */ /* * -- SuperLU MT routine (version 2.2) -- * Lawrence Berkeley National Lab, Univ. of California Berkeley, * and Xerox Palo Alto Research Center. * September 10, 2007 * * Last modified: * -- 8/29/2013: added lock to access Stack memory supplied by user * */ #include "slu_mt_ddefs.h" /* ------------------ Constants & Macros ------------------ */ #define EXPAND 1.5 #define NO_MEMTYPE 4 /* 0: lusup; 1: ucol; 2: lsub; 3: usub */ #define GluIntArray(n) (9 * (n) + 5) /* ------------------- Internal prototypes ------------------- */ void *pdgstrf_expand (int_t *, MemType,int_t, int_t, GlobalLU_t *); void copy_mem_double (int_t, void *, void *); void pdgstrf_StackCompress(GlobalLU_t *); void pdgstrf_SetupSpace (void *, int_t); void *duser_malloc (int_t, int_t); void duser_free (int_t, int_t); /* ---------------------------------------------- External prototypes (in memory.c - prec-indep) ---------------------------------------------- */ extern void copy_mem_int (int_t, void *, void *); extern void user_bcopy (char *, char *, int_t); typedef struct { int_t size; int_t used; int_t top1; /* grow upward, relative to &array[0] */ int_t top2; /* grow downward */ void *array; #if ( MACH==PTHREAD ) pthread_mutex_t lock;; #endif } LU_stack_t; typedef enum {HEAD, TAIL} stack_end_t; typedef enum {SYSTEM, USER} LU_space_t; ExpHeader *dexpanders = 0; /* Array of pointers to 4 types of memory */ static LU_stack_t stack; static int_t no_expand; static int_t ndim; static LU_space_t whichspace; /* 0 - system malloc'd; 1 - user provided */ /* Macros to manipulate stack */ #define StackFull(x) ( x + stack.used >= stack.size ) #define NotDoubleAlign(addr) ( (long long int)addr & 7 ) #define DoubleAlign(addr) ( ((long long int)addr + 7) & ~7L ) #define Reduce(alpha) ((alpha + 1) / 2) /* i.e. (alpha-1)/2 + 1 */ /* temporary space used by BLAS calls */ #define NUM_TEMPV(n,w,t,b) (SUPERLU_MAX( 2*n, (t + b)*w )) /* * Setup the memory model to be used for factorization. * lwork = 0: use system malloc; * lwork > 0: use user-supplied work[] space. */ void pdgstrf_SetupSpace(void *work, int_t lwork) { if ( lwork == 0 ) { whichspace = SYSTEM; /* malloc/free */ } else if ( lwork > 0 ) { whichspace = USER; /* user provided space */ stack.size = lwork; stack.used = 0; stack.top1 = 0; stack.top2 = lwork; stack.array = (void *) work; } #if ( MACH==PTHREAD ) pthread_mutex_init ( &stack.lock, NULL); #endif } /* * Destroy the lock used for user stack memory. */ void pdgstrf_StackFree() { #if ( MACH==PTHREAD ) /* Use pthread ... */ if ( whichspace == USER ) pthread_mutex_destroy( &stack.lock ); #endif } void *duser_malloc(int_t bytes, int_t which_end) { void *buf; #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_lock( &stack.lock ); #elif ( MACH==OPENMP ) /* Use openMP ... */ #pragma omp critical ( STACK_LOCK ) #endif { if ( StackFull(bytes) ) { buf = NULL; goto end; } if ( which_end == HEAD ) { buf = (char*) stack.array + stack.top1; stack.top1 += bytes; } else { stack.top2 -= bytes; buf = (char*) stack.array + stack.top2; } stack.used += bytes; end: ; } /* ---- end critical section ---- */ #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_unlock( &stack.lock ); #endif return buf; } void duser_free(int_t bytes, int_t which_end) { #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_lock( &stack.lock ); #elif ( MACH==OPENMP ) /* Use openMP ... */ #pragma omp critical ( STACK_LOCK ) #endif { if ( which_end == HEAD ) stack.top1 -= bytes; else stack.top2 += bytes; stack.used -= bytes; } #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_unlock( &stack.lock ); #endif } /* Returns the working storage used during factorization */ int_t superlu_dTempSpace(int_t n, int_t w, int_t p) { register float tmp, ptmp; register int_t iword = sizeof(int_t), dword = sizeof(double); int_t maxsuper = sp_ienv(3), rowblk = sp_ienv(4); /* globally shared */ tmp = 14 * n * iword; /* local to each processor */ ptmp = (2 * w + 5 + NO_MARKER) * n * iword; ptmp += (n * w + NUM_TEMPV(n,w,maxsuper,rowblk)) * dword; #if ( PRNTlevel>=1 ) printf("Per-processor work[] %.0f MB\n", ptmp/1024/1024); #endif ptmp *= p; return (tmp + ptmp); } /* * superlu_memusage consists of the following fields: * o for_lu (float) * The amount of space used in bytes for L\U data structures. * o total_needed (float) * The amount of space needed in bytes to perform factorization. * o expansions (int) * The number of memory expansions during the LU factorization. */ int_t superlu_dQuerySpace(int_t P, SuperMatrix *L, SuperMatrix *U, int_t panel_size, superlu_memusage_t *superlu_memusage) { SCPformat *Lstore; NCPformat *Ustore; register int_t n, iword, dword, lwork; Lstore = L->Store; Ustore = U->Store; n = L->ncol; iword = sizeof(int_t); dword = sizeof(double); /* L supernodes of type SCP */ superlu_memusage->for_lu = (float) (7*n + 3) * iword + (float) Lstore->nzval_colend[n-1] * dword + (float) Lstore->rowind_colend[n-1] * iword; /* U columns of type NCP */ superlu_memusage->for_lu += (2*n + 1) * iword + (float) Ustore->colend[n-1] * (dword + iword); /* Working storage to support factorization */ lwork = superlu_dTempSpace(n, panel_size, P); superlu_memusage->total_needed = superlu_memusage->for_lu + lwork; superlu_memusage->expansions = --no_expand; return 0; } float pdgstrf_memory_use(const int_t nzlmax, const int_t nzumax, const int_t nzlumax) { register float iword, dword, t; iword = sizeof(int_t); dword = sizeof(double); t = 10. * ndim * iword + nzlmax * iword + nzumax * (iword + dword) + nzlumax * dword; return t; } /* * Allocate storage for the data structures common to all factor routines. * For those unpredictable size, make a guess as FILL * nnz(A). * Return value: * If lwork = -1, return the estimated amount of space required; * otherwise, return the amount of space actually allocated when * memory allocation failure occurred. */ float pdgstrf_MemInit(int_t n, int_t annz, superlumt_options_t *superlumt_options, SuperMatrix *L, SuperMatrix *U, GlobalLU_t *Glu) { register int_t nprocs = superlumt_options->nprocs; yes_no_t refact = superlumt_options->refact; register int_t panel_size = superlumt_options->panel_size; register int_t lwork = superlumt_options->lwork; void *work = superlumt_options->work; int_t iword, dword, retries = 0; SCPformat *Lstore; NCPformat *Ustore; int_t *xsup, *xsup_end, *supno; int_t *lsub, *xlsub, *xlsub_end; double *lusup; int_t *xlusup, *xlusup_end; double *ucol; int_t *usub, *xusub, *xusub_end; int_t nzlmax, nzumax, nzlumax; int_t FILL_LUSUP = sp_ienv(6); /* Guess the fill-in growth for LUSUP */ int_t FILL_UCOL = sp_ienv(7); /* Guess the fill-in growth for UCOL */ int_t FILL_LSUB = sp_ienv(8); /* Guess the fill-in growth for LSUB */ no_expand = 0; ndim = n; iword = sizeof(int_t); dword = sizeof(double); if ( !dexpanders ) dexpanders = (ExpHeader *) SUPERLU_MALLOC(NO_MEMTYPE * sizeof(ExpHeader)); if ( refact == NO ) { /* Guess amount of storage needed by L\U factors. */ if ( FILL_UCOL < 0 ) nzumax = -FILL_UCOL * annz; else nzumax = FILL_UCOL; if ( FILL_LSUB < 0 ) nzlmax = -FILL_LSUB * annz; else nzlmax = FILL_LSUB; if ( Glu->dynamic_snode_bound == YES ) { if ( FILL_LUSUP < 0 ) nzlumax = -FILL_LUSUP * annz; else nzlumax = FILL_LUSUP; /* estimate an upper bound */ } else { nzlumax = Glu->nzlumax; /* preset as static upper bound */ } if ( lwork == -1 ) { return (GluIntArray(n) * iword + superlu_dTempSpace(n, panel_size, nprocs) + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword); } else { pdgstrf_SetupSpace(work, lwork); } /* Integer pointers for L\U factors */ if ( whichspace == SYSTEM ) { xsup = intMalloc(n+1); xsup_end = intMalloc(n); supno = intMalloc(n+1); xlsub = intMalloc(n+1); xlsub_end = intMalloc(n); xlusup = intMalloc(n+1); xlusup_end = intMalloc(n); xusub = intMalloc(n+1); xusub_end = intMalloc(n); } else { xsup = (int_t *)duser_malloc((n+1) * iword, HEAD); xsup_end = (int_t *)duser_malloc((n) * iword, HEAD); supno = (int_t *)duser_malloc((n+1) * iword, HEAD); xlsub = (int_t *)duser_malloc((n+1) * iword, HEAD); xlsub_end = (int_t *)duser_malloc((n) * iword, HEAD); xlusup = (int_t *)duser_malloc((n+1) * iword, HEAD); xlusup_end = (int_t *)duser_malloc((n) * iword, HEAD); xusub = (int_t *)duser_malloc((n+1) * iword, HEAD); xusub_end = (int_t *)duser_malloc((n) * iword, HEAD); } lusup = (double *) pdgstrf_expand( &nzlumax, LUSUP, 0, 0, Glu ); ucol = (double *) pdgstrf_expand( &nzumax, UCOL, 0, 0, Glu ); lsub = (int_t *) pdgstrf_expand( &nzlmax, LSUB, 0, 0, Glu ); usub = (int_t *) pdgstrf_expand( &nzumax, USUB, 0, 1, Glu ); while ( !ucol || !lsub || !usub ) { /*SUPERLU_ABORT("Not enough core in LUMemInit()");*/ #if (PRNTlevel==1) printf(".. pdgstrf_MemInit(): #retries " IFMT "\n", ++retries); #endif if ( whichspace == SYSTEM ) { SUPERLU_FREE(ucol); SUPERLU_FREE(lsub); SUPERLU_FREE(usub); } else { duser_free(nzumax*dword+(nzlmax+nzumax)*iword, HEAD); } nzumax /= 2; /* reduce request */ nzlmax /= 2; if ( nzumax < annz/2 ) { printf("Not enough memory to perform factorization.\n"); return (pdgstrf_memory_use(nzlmax, nzumax, nzlumax) + n); } ucol = (double *) pdgstrf_expand( &nzumax, UCOL, 0, 0, Glu ); lsub = (int_t *) pdgstrf_expand( &nzlmax, LSUB, 0, 0, Glu ); usub = (int_t *) pdgstrf_expand( &nzumax, USUB, 0, 1, Glu ); } if ( !lusup ) { float t = pdgstrf_memory_use(nzlmax, nzumax, nzlumax) + n; printf("Not enough memory to perform factorization .. " "need %.1f GBytes\n", t*1e-9); fflush(stdout); return (t); } } else { /* refact == YES */ Lstore = L->Store; Ustore = U->Store; xsup = Lstore->sup_to_colbeg; xsup_end = Lstore->sup_to_colend; supno = Lstore->col_to_sup; xlsub = Lstore->rowind_colbeg; xlsub_end= Lstore->rowind_colend; xlusup = Lstore->nzval_colbeg; xlusup_end= Lstore->nzval_colend; xusub = Ustore->colbeg; xusub_end= Ustore->colend; nzlmax = Glu->nzlmax; /* max from previous factorization */ nzumax = Glu->nzumax; nzlumax = Glu->nzlumax; if ( lwork == -1 ) { return (GluIntArray(n) * iword + superlu_dTempSpace(n, panel_size, nprocs) + (nzlmax+nzumax)*iword + (nzlumax+nzumax)*dword); } else if ( lwork == 0 ) { whichspace = SYSTEM; } else { whichspace = USER; stack.size = lwork; stack.top2 = lwork; } lsub = dexpanders[LSUB].mem = Lstore->rowind; lusup = dexpanders[LUSUP].mem = Lstore->nzval; usub = dexpanders[USUB].mem = Ustore->rowind; ucol = dexpanders[UCOL].mem = Ustore->nzval;; dexpanders[LSUB].size = nzlmax; dexpanders[LUSUP].size = nzlumax; dexpanders[USUB].size = nzumax; dexpanders[UCOL].size = nzumax; } Glu->xsup = xsup; Glu->xsup_end = xsup_end; Glu->supno = supno; Glu->lsub = lsub; Glu->xlsub = xlsub; Glu->xlsub_end = xlsub_end; Glu->lusup = lusup; Glu->xlusup = xlusup; Glu->xlusup_end = xlusup_end; Glu->ucol = ucol; Glu->usub = usub; Glu->xusub = xusub; Glu->xusub_end = xusub_end; Glu->nzlmax = nzlmax; Glu->nzumax = nzumax; Glu->nzlumax = nzlumax; ++no_expand; #if ( PRNTlevel>=1 ) printf(".. pdgstrf_MemInit() refact %d, whichspace %d, nzlumax " IFMT ", nzumax " IFMT ", nzlmax " IFMT "\n", refact, whichspace, nzlumax, nzumax, nzlmax); printf(".. pdgstrf_MemInit() FILL_LUSUP " IFMT ", FILL_UCOL " IFMT ", FILL_LSUB " IFMT "\n", FILL_LUSUP, FILL_UCOL, FILL_LSUB); fflush(stdout); #endif return 0; } /* pdgstrf_MemInit */ /* * Allocate known working storage. Returns 0 if success, otherwise * returns the number of bytes allocated so far when failure occurred. */ int_t pdgstrf_WorkInit(int_t n, int_t panel_size, int_t **iworkptr, double **dworkptr) { int_t isize, dsize, extra; double *old_ptr; int_t maxsuper = sp_ienv(3), rowblk = sp_ienv(4); isize = (2*panel_size + 5 + NO_MARKER) * n * sizeof(int_t); dsize = (n * panel_size + NUM_TEMPV(n,panel_size,maxsuper,rowblk)) * sizeof(double); if ( whichspace == SYSTEM ) *iworkptr = (int_t *) intCalloc(isize/sizeof(int_t)); else *iworkptr = (int_t *) duser_malloc(isize, TAIL); if ( ! *iworkptr ) { fprintf(stderr, "pdgstrf_WorkInit: malloc fails for local iworkptr[]\n"); return (isize + n); } if ( whichspace == SYSTEM ) *dworkptr = (double *) SUPERLU_MALLOC((size_t) dsize); else { *dworkptr = (double *) duser_malloc(dsize, TAIL); if ( NotDoubleAlign(*dworkptr) ) { old_ptr = *dworkptr; *dworkptr = (double*) DoubleAlign(*dworkptr); *dworkptr = (double*) ((double*)*dworkptr - 1); extra = (char*)old_ptr - (char*)*dworkptr; #if ( DEBUGlevel>=1 ) printf("pdgstrf_WorkInit: not aligned, extra" IFMT "\n", extra); #endif #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_lock( &stack.lock ); #elif ( MACH==OPENMP ) /* Use openMP ... */ #pragma omp critical ( STACK_LOCK ) #endif { stack.top2 -= extra; stack.used += extra; } #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_unlock( &stack.lock ); #endif } } /* else */ if ( ! *dworkptr ) { printf("malloc fails for local dworkptr[] ... dsize " IFMT "\n", dsize); return (isize + dsize + n); } return 0; } /* * Set up pointers for real working arrays. */ void pdgstrf_SetRWork(int_t n, int_t panel_size, double *dworkptr, double **dense, double **tempv) { double zero = 0.0; int_t maxsuper = sp_ienv(3); int_t rowblk = sp_ienv(4); *dense = dworkptr; *tempv = *dense + panel_size*n; dfill (*dense, n * panel_size, zero); dfill (*tempv, NUM_TEMPV(n,panel_size,maxsuper,rowblk), zero); } /* * Free the working storage used by factor routines. */ void pdgstrf_WorkFree(int_t *iwork, double *dwork, GlobalLU_t *Glu) { if ( whichspace == SYSTEM ) { SUPERLU_FREE (iwork); SUPERLU_FREE (dwork); } else { #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_lock( &stack.lock ); #elif ( MACH==OPENMP ) /* Use openMP ... */ #pragma omp critical ( STACK_LOCK ) #endif { stack.used -= (stack.size - stack.top2); stack.top2 = stack.size; /* pdgstrf_StackCompress(Glu); */ } #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_unlock( &stack.lock ); #endif } } /* * Expand the data structures for L and U during the factorization. * Return value: 0 - successful return * > 0 - number of bytes allocated when run out of space * * @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ * !! Warning: Not Implemented in SuperLU_MT !! * @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ */ int_t pdgstrf_MemXpand( int_t jcol, int_t next, /* number of elements currently in the factors */ MemType mem_type,/* which type of memory to expand */ int_t *maxlen, /* modified - max. length of a data structure */ GlobalLU_t *Glu /* modified - global LU data structures */ ) { void *new_mem; #ifdef CHK_EXPAND printf("pdgstrf_MemXpand(): jcol " IFMT ", next " IFMT ", maxlen " IFMT ", MemType " IFMT "\n", jcol, next, *maxlen, mem_type); #endif if (mem_type == USUB) new_mem = pdgstrf_expand(maxlen, mem_type, next, 1, Glu); else new_mem = pdgstrf_expand(maxlen, mem_type, next, 0, Glu); if ( !new_mem ) { int_t nzlmax = Glu->nzlmax; int_t nzumax = Glu->nzumax; int_t nzlumax = Glu->nzlumax; fprintf(stderr, "Can't expand MemType %d : jcol " IFMT "\n", mem_type, jcol); return (pdgstrf_memory_use(nzlmax, nzumax, nzlumax) + ndim); } switch ( mem_type ) { case LUSUP: Glu->lusup = (double *) new_mem; Glu->nzlumax = *maxlen; break; case UCOL: Glu->ucol = (double *) new_mem; Glu->nzumax = *maxlen; break; case LSUB: Glu->lsub = (int_t *) new_mem; Glu->nzlmax = *maxlen; break; case USUB: Glu->usub = (int_t *) new_mem; Glu->nzumax = *maxlen; break; } return 0; } void copy_mem_double(int_t howmany, void *old, void *new) { register int_t i; double *dold = old; double *dnew = new; for (i = 0; i < howmany; i++) dnew[i] = dold[i]; } /* * Expand the existing storage to accommodate more fill-ins. */ void *pdgstrf_expand( int_t *prev_len, /* length used from previous call */ MemType type, /* which part of the memory to expand */ int_t len_to_copy, /* size of memory to be copied to new store */ int_t keep_prev, /* = 1: use prev_len; = 0: compute new_len to expand */ GlobalLU_t *Glu /* modified - global LU data structures */ ) { double alpha = EXPAND; void *new_mem, *old_mem; int_t new_len, tries, lword, extra, bytes_to_copy; void *ret = NULL; if ( no_expand == 0 || keep_prev ) /* First time allocate requested */ new_len = *prev_len; else { new_len = alpha * *prev_len; } if ( type == LSUB || type == USUB ) lword = sizeof(int_t); else lword = sizeof(double); if ( whichspace == SYSTEM ) { new_mem = (void *) SUPERLU_MALLOC( (size_t) new_len * lword ); if ( no_expand != 0 ) { tries = 0; if ( keep_prev ) { if ( !new_mem ) return (NULL); } else { while ( !new_mem ) { if ( ++tries > 10 ) return (NULL); alpha = Reduce(alpha); new_len = alpha * *prev_len; new_mem = (void *) SUPERLU_MALLOC((size_t) new_len * lword); } } if ( type == LSUB || type == USUB ) { copy_mem_int(len_to_copy, dexpanders[type].mem, new_mem); } else { copy_mem_double(len_to_copy, dexpanders[type].mem, new_mem); } SUPERLU_FREE (dexpanders[type].mem); } dexpanders[type].mem = (void *) new_mem; } else { /* whichspace == USER */ if ( no_expand == 0 ) { new_mem = duser_malloc(new_len * lword, HEAD); if ( NotDoubleAlign(new_mem) && (type == LUSUP || type == UCOL) ) { old_mem = new_mem; new_mem = (void *)DoubleAlign(new_mem); extra = (char*)new_mem - (char*)old_mem; #ifdef CHK_EXPAND printf("expand(): not aligned, extra " IFMT "\n", extra); #endif #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_lock( &stack.lock ); #elif ( MACH==OPENMP ) /* Use openMP ... */ #pragma omp critical ( STACK_LOCK ) #endif { stack.top1 += extra; stack.used += extra; } #if ( MACH==PTHREAD ) /* Use pthread ... */ pthread_mutex_unlock( &stack.lock ); #endif } dexpanders[type].mem = (void *) new_mem; } else { tries = 0; extra = (new_len - *prev_len) * lword; if ( keep_prev ) { if ( StackFull(extra) ) { new_len = 0; dexpanders[type].mem = NULL; return NULL; } } else { while ( StackFull(extra) ) { if ( ++tries > 10 ) { new_len = 0; dexpanders[type].mem = NULL; return NULL; } alpha = Reduce(alpha); new_len = alpha * *prev_len; extra = (new_len - *prev_len) * lword; } } if ( type != USUB ) { new_mem = (void*)((char*)dexpanders[type + 1].mem + extra); bytes_to_copy = (char*)stack.array + stack.top1 - (char*)dexpanders[type + 1].mem; user_bcopy(dexpanders[type+1].mem, new_mem, bytes_to_copy); if ( type < USUB ) { Glu->usub = dexpanders[USUB].mem = (void*)((char*)dexpanders[USUB].mem + extra); } if ( type < LSUB ) { Glu->lsub = dexpanders[LSUB].mem = (void*)((char*)dexpanders[LSUB].mem + extra); } if ( type < UCOL ) { Glu->ucol = dexpanders[UCOL].mem = (void*)((char*)dexpanders[UCOL].mem + extra); } stack.top1 += extra; stack.used += extra; if ( type == UCOL ) { stack.top1 += extra; /* Add same amount for USUB */ stack.used += extra; } } /* if ... */ } /* else ... */ } /* else, whichspace == USER */ #ifdef DEBUG printf("pdgstrf_expand[type " IFMT "]\n", type); #endif dexpanders[type].size = new_len; *prev_len = new_len; if ( no_expand ) ++no_expand; return (void *) dexpanders[type].mem; } /* expand */ /* * Compress the work[] array to remove fragmentation. */ void pdgstrf_StackCompress(GlobalLU_t *Glu) { register int_t iword, dword; char *last, *fragment; int_t *ifrom, *ito; double *dfrom, *dto; int_t *xlsub, *lsub, *xusub_end, *usub, *xlusup; double *ucol, *lusup; iword = sizeof(int_t); dword = sizeof(double); xlsub = Glu->xlsub; lsub = Glu->lsub; xusub_end = Glu->xusub_end; usub = Glu->usub; xlusup = Glu->xlusup; ucol = Glu->ucol; lusup = Glu->lusup; dfrom = ucol; dto = (double *)((char*)lusup + xlusup[ndim] * dword); copy_mem_double(xusub_end[ndim-1], dfrom, dto); ucol = dto; ifrom = lsub; ito = (int_t *) ((char*)ucol + xusub_end[ndim-1] * iword); copy_mem_int(xlsub[ndim], ifrom, ito); lsub = ito; ifrom = usub; ito = (int_t *) ((char*)lsub + xlsub[ndim] * iword); copy_mem_int(xusub_end[ndim-1], ifrom, ito); usub = ito; last = (char*)usub + xusub_end[ndim-1] * iword; fragment = (char*) ((char*)stack.array + stack.top1 - last); stack.used -= (long long int) fragment; stack.top1 -= (long long int) fragment; Glu->ucol = ucol; Glu->lsub = lsub; Glu->usub = usub; #ifdef CHK_EXPAND printf("pdgstrf_StackCompress: fragment " IFMT "\n", fragment); /* PrintStack("After compress", Glu); for (last = 0; last < ndim; ++last) print_lu_col("After compress:", last, 0);*/ #endif } /* * Allocate storage for original matrix A */ void dallocateA(int_t n, int_t nnz, double **a, int_t **asub, int_t **xa) { *a = (double *) doubleMalloc(nnz); *asub = (int_t *) intMalloc(nnz); *xa = (int_t *) intMalloc(n+1); } double *doubleMalloc(int_t n) { double *buf; buf = (double *) SUPERLU_MALLOC( (size_t) n * sizeof(double) ); if ( !buf ) { fprintf(stderr, "SUPERLU_MALLOC failed for buf in doubleMalloc()"); exit (1); } return (buf); } double *doubleCalloc(int_t n) { double *buf; register int_t i; double zero = 0.0; buf = (double *) SUPERLU_MALLOC( (size_t) n * sizeof(double) ); if ( !buf ) { fprintf(stderr, "SUPERLU_MALLOC failed for buf in doubleCalloc()"); exit (1); } for (i = 0; i < n; ++i) buf[i] = zero; return (buf); } /* * Set up memory image in lusup[*], using the supernode boundaries in * the Householder matrix. * * In both static and dynamic scheme, the relaxed supernodes (leaves) * are stored in the beginning of lusup[*]. In the static scheme, the * memory is also set aside for the internal supernodes using upper * bound information from H. In the dynamic scheme, however, the memory * for the internal supernodes is not allocated by this routine. * * Return value * o Static scheme: number of nonzeros of all the supernodes in H. * o Dynamic scheme: number of nonzeros of the relaxed supernodes. */ int_t dPresetMap( const int_t n, SuperMatrix *A, /* original matrix permuted by columns */ pxgstrf_relax_t *pxgstrf_relax, /* relaxed supernodes */ superlumt_options_t *superlumt_options, /* input */ GlobalLU_t *Glu /* modified */ ) { register int_t i, j, k, w, rs, rs_lastcol, krow, kmark, maxsup, nextpos; register int_t rs_nrow; /* number of nonzero rows in a relaxed supernode */ int_t *marker, *asub, *xa_begin, *xa_end; NCPformat *Astore; int_t *map_in_sup; /* memory mapping function; values irrelevant on entry. */ int_t *colcnt; /* column count of Lc or H */ int_t *super_bnd; /* supernodes partition in H */ char *snode_env, *getenv(); snode_env = getenv("SuperLU_DYNAMIC_SNODE_STORE"); if ( snode_env != NULL ) { Glu->dynamic_snode_bound = YES; #if ( PRNTlevel>=1 ) printf(".. Use dynamic alg. to allocate storage for L supernodes.\n"); #endif } else Glu->dynamic_snode_bound = NO; Astore = A->Store; asub = Astore->rowind; xa_begin = Astore->colbeg; xa_end = Astore->colend; rs = 1; marker = intMalloc(n); ifill(marker, n, EMPTY); map_in_sup = Glu->map_in_sup = intCalloc(n+1); colcnt = superlumt_options->colcnt_h; super_bnd = superlumt_options->part_super_h; nextpos = 0; /* Split large supernode into smaller pieces */ maxsup = sp_ienv(3); for (j = 0; j < n; ) { w = super_bnd[j]; k = j + w; if ( w > maxsup ) { w = w % maxsup; if ( w == 0 ) w = maxsup; while ( j < k ) { super_bnd[j] = w; j += w; w = maxsup; } } j = k; } for (j = 0; j < n; j += w) { if ( Glu->dynamic_snode_bound == NO ) map_in_sup[j] = nextpos; if ( pxgstrf_relax[rs].fcol == j ) { /* Column j starts a relaxed supernode. */ map_in_sup[j] = nextpos; rs_nrow = 0; w = pxgstrf_relax[rs++].size; rs_lastcol = j + w; for (i = j; i < rs_lastcol; ++i) { /* for each nonzero in A[*,i] */ for (k = xa_begin[i]; k < xa_end[i]; k++) { krow = asub[k]; kmark = marker[krow]; if ( kmark != j ) { /* first time visit krow */ marker[krow] = j; ++rs_nrow; } } } nextpos += w * rs_nrow; /* Find the next H-supernode, with leading column i, which is outside the relaxed supernode, rs. */ for (i = j; i < rs_lastcol; k = i, i += super_bnd[i]); if ( i > rs_lastcol ) { /* The w columns [rs_lastcol, i) may join in the preceeding relaxed supernode; make sure we leave enough room for the combined supernode. */ w = i - rs_lastcol; nextpos += w * SUPERLU_MAX( rs_nrow, colcnt[k] ); } w = i - j; } else { /* Column j starts a supernode in H */ w = super_bnd[j]; if ( Glu->dynamic_snode_bound == NO ) nextpos += w * colcnt[j]; } /* Set up the offset (negative) to the leading column j of a supernode in H */ for (i = 1; i < w; ++i) map_in_sup[j + i] = -i; } /* for j ... */ if ( Glu->dynamic_snode_bound == YES ) Glu->nextlu = nextpos; else map_in_sup[n] = nextpos; #if ( PRNTlevel>=1 ) printf("** PresetMap() allocates " IFMT " reals to lusup[*]....\n", nextpos); #endif free (marker); return nextpos; }
types-rng.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "config.h" #include "io.h" #include "jump.h" #include "malloc.h" #include "random.h" #include "seed.h" #include "version.h" #include "SFMT.h" #include "SFMT-jump.h" const char * const TOOL = "types-rng"; _Noreturn static void usage(void) { version(); printf( "\n" "Generate the initial state for the random number generator.\n" "\n" "Usage: %s <RNG-STATE-FILE>\n" "\n", TOOL); exit(EXIT_SUCCESS); } int main(int argc, char **argv) { if (argc == 1) { usage(); } if (argc != 2) { myerror("wrong number of parameters"); } const char *rng_state_file = argv[1]; // Setup seed rng_state_t *rng_state; MYMALLOC(rng_state, rng_state_t, NGEN); uint32_t seed[SEED_LENGTH]; memcpy(seed, SEED, sizeof(seed)); // First generate 0, 100, 200, ... sequentially fprintf(stderr, "%s: ", TOOL); sfmt_init_by_array(&rng_state[0], seed, SEED_LENGTH); for (unsigned i = JUMP_FACTOR; i < NGEN; i += JUMP_FACTOR) { fprintf(stderr, ":"); rng_state_t tmp = rng_state[i-JUMP_FACTOR]; SFMT_jump(&tmp, JUMP_MANY); rng_state[i] = tmp; } // Then generate in parallel: // - 1, 2, 3, ..., 99 // - 101, 102, 102, ..., 199 #pragma omp parallel for for (unsigned i = 0; i < NGEN; i += JUMP_FACTOR) { for (unsigned j = i+1; j < i+JUMP_FACTOR && j < NGEN; j++) { fprintf(stderr, "."); rng_state_t tmp = rng_state[j-1]; SFMT_jump(&tmp, JUMP_ONE); rng_state[j] = tmp; } } fprintf(stderr, "\n"); myfile_t f = myopen(rng_state_file, true); rng_state_write(&f, rng_state); myclose(&f); free(rng_state); return EXIT_SUCCESS; }
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info)); (void) memset(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % const double attenuate,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o attenuate: attenuate the random distribution. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, const double attenuate,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel); if ((traits == UndefinedPixelTrait) || (noise_traits == UndefinedPixelTrait)) continue; if (((noise_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(noise_image,channel,p[i],q); continue; } SetPixelChannel(noise_image,channel,ClampToQuantum( GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)), q); } p+=GetPixelChannels(image); q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse) { shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum quantum; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) < quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) < quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum); quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) > quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) > quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(shift_image,ClampToQuantum(pixel.red),q); SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q); SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q); p+=GetPixelChannels(image); q+=GetPixelChannels(shift_image); } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image,exception); (void) NegateImage(charcoal_image,MagickFalse,exception); (void) GrayscaleImage(charcoal_image,image->intensity,exception); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *blend, % const PixelInfo *colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A character string indicating the level of blending as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *blend, const PixelInfo *colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" #define Colorize(pixel,blend_percentage,colorize) \ (((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0) CacheView *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; PixelInfo blend_percentage; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse) { colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) || (IsPixelInfoGray(colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace,exception); if ((colorize_image->alpha_trait == UndefinedPixelTrait) && (colorize->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(colorize_image,OpaqueAlpha,exception); if (blend == (const char *) NULL) return(colorize_image); GetPixelInfo(colorize_image,&blend_percentage); flags=ParseGeometry(blend,&geometry_info); blend_percentage.red=geometry_info.rho; blend_percentage.green=geometry_info.rho; blend_percentage.blue=geometry_info.rho; blend_percentage.black=geometry_info.rho; blend_percentage.alpha=(MagickRealType) TransparentAlpha; if ((flags & SigmaValue) != 0) blend_percentage.green=geometry_info.sigma; if ((flags & XiValue) != 0) blend_percentage.blue=geometry_info.xi; if ((flags & PsiValue) != 0) blend_percentage.alpha=geometry_info.psi; if (blend_percentage.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) blend_percentage.black=geometry_info.psi; if ((flags & ChiValue) != 0) blend_percentage.alpha=geometry_info.chi; } /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1) #endif for (y=0; y < (ssize_t) colorize_image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) colorize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++) { PixelTrait traits = GetPixelChannelTraits(colorize_image, (PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(colorize_image,q) <= (QuantumRange/2))) continue; SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum( Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i), GetPixelInfoChannel(colorize,(PixelChannel) i))),q); } q+=GetPixelChannels(colorize_image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++, colorize_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ /* FUTURE: modify to make use of a MagickMatrix Mutliply function That should be provided in "matrix.c" (ASIDE: actually distorts should do this too but currently doesn't) */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Map given color_matrix, into a 6x6 matrix RGBKA and a constant */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse) { color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Apply the ColorMatrix to image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; GetPixelInfoPixel(image,p,&pixel); height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { double sum; sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]* GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p); if (image->colorspace == CMYKColorspace) sum+=ColorMatrix[v][3]*GetPixelBlack(image,p); if (image->alpha_trait != UndefinedPixelTrait) sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p); sum+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: pixel.red=sum; break; case 1: pixel.green=sum; break; case 2: pixel.blue=sum; break; case 3: pixel.black=sum; break; case 4: pixel.alpha=sum; break; default: break; } } SetPixelViaPixelInfo(color_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(color_image); } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent], statistic[MagickPathExtent]; const char *value; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1 << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*StringToDouble(value,(char **) NULL)); } (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g", standard_deviation); } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,const size_t,double *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { char *q, subexpression[MagickPathExtent], symbol[MagickPathExtent]; const char *p, *value; Image *image; MagickBooleanType status; PixelInfo pixel; double alpha, beta; PointInfo point; register ssize_t i; size_t length, level; p=expression; i=GetImageIndexInList(fx_info->images); level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, 0,&beta,exception); i=(ssize_t) alpha; if (*p != '\0') p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, 0,&beta,exception); point.x=alpha; point.y=beta; if (*p != '\0') p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, 0,&beta,exception); point.x+=alpha; point.y+=beta; if (*p != '\0') p++; } if (*p == '.') p++; } } length=GetImageListLength(fx_info->images); while (i < 0) i+=(ssize_t) length; if (length != 0) i%=length; image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } GetPixelInfo(image,&pixel); status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); (void) status; if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; (void) CopyMagickString(name,p,MagickPathExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=strlen(name); } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString( name),ClonePixelInfo(&pixel)); p+=strlen(name); } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case IndexPixelChannel: return(0.0); case IntensityPixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return((double) image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return((double) x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return((double) y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return((double) GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return((double) image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return((double) image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return((double) image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return((double) image->page.y); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return((double) image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return((double) GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return((double) image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double)GetImageDepth(image, fx_info->exception)); break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=(-1); level=0; subexpression=(const char *) NULL; target=NullPrecedence; while ((c != '\0') && (*expression != '\0')) { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit(c) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit(c) != 0) || (strchr(")",c) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit(c) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,const size_t depth,double *beta, ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 #define FxMaxSubexpressionDepth 600 #define FxReturn(value) \ { \ subexpression=DestroyString(subexpression); \ return(value); \ } char *q, *subexpression; double alpha, gamma; register const char *p; *beta=0.0; subexpression=AcquireString(expression); *subexpression='\0'; if (depth > FxMaxSubexpressionDepth) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",expression); FxReturn(0.0); } if (exception->severity >= ErrorException) FxReturn(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') FxReturn(0.0); p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) (~(size_t) *beta); FxReturn(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p, depth+1,beta,exception)); FxReturn(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); FxReturn(0.0); } FxReturn(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=fabs(floor((*beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); FxReturn(0.0); } FxReturn(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); FxReturn(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); if ((size_t) (gamma+0.5) >= (8*sizeof(size_t))) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ShiftCountOverflow","`%s'",subexpression); FxReturn(0.0); } *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); FxReturn(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); FxReturn(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); FxReturn(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; FxReturn(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; FxReturn(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth+1,beta, exception); FxReturn(gamma); } case '=': { char numeric[MagickPathExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); FxReturn(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); (void) FormatLocaleString(numeric,MagickPathExtent,"%.20g",*beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); FxReturn(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta, exception); FxReturn(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1, beta,exception); FxReturn(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { if (depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); (void) CopyMagickString(subexpression,expression+1,MagickPathExtent); if (strlen(subexpression) != 0) subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1, beta,exception); FxReturn(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1, beta,exception); FxReturn((double) (~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(acosh(alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(asinh(alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(asin(alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(atanh(alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(ceil(alpha)); } if (LocaleNCompare(expression,"clamp",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha < 0.0) FxReturn(0.0); if (alpha > 1.0) FxReturn(1.0); FxReturn(alpha); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(cosh(alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="opacity"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="opacity"; break; default: type="unknown"; break; } (void) CopyMagickString(subexpression,expression+6,MagickPathExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); FxReturn(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) FxReturn(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (LocaleNCompare(expression,"erf",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(erf(alpha)); } #endif if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) FxReturn(2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI); FxReturn(gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); FxReturn((double) gcd); } if (LocaleCompare(expression,"g") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleCompare(expression,"hue") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(floor(alpha)); } if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn((double) !!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0.0) FxReturn(1.0); gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha)); FxReturn(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2, depth+1,beta,exception); FxReturn(log(alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn(log10(alpha)/log10(2.0)); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) FxReturn(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); gamma=alpha-floor((alpha/(*beta)))*(*beta); FxReturn(gamma); } if (LocaleCompare(expression,"m") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn((double) (alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) FxReturn(1.0); if (LocaleCompare(expression,"o") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) FxReturn(MagickPHI); if (LocaleCompare(expression,"pi") == 0) FxReturn(MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) FxReturn(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) FxReturn(QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); FxReturn(alpha); } if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); FxReturn(floor(alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); if (alpha == 0) FxReturn(1.0); gamma=sin((MagickPI*alpha))/(MagickPI*alpha); FxReturn(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sinh(alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(sin(alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(sqrt(alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6, depth+1,beta,exception); FxReturn((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4, depth+1,beta,exception); FxReturn(tanh(alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3, depth+1,beta,exception); FxReturn(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) FxReturn(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); if (alpha >= 0.0) FxReturn(floor(alpha)); FxReturn(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth+1,beta,exception); } while (fabs(alpha) >= MagickEpsilon); FxReturn(*beta); } if (LocaleCompare(expression,"w") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } default: break; } subexpression=DestroyString(subexpression); q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,exception)); FxReturn(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; double alpha; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) memset(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if (((fx_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImage) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *canvas_view, *implode_view, *interpolate_view; Image *canvas, *implode_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception); implode_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (implode_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); implode_image=DestroyImage(implode_image); return((Image *) NULL); } /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*canvas->columns; center.y=0.5*canvas->rows; radius=center.x; if (canvas->columns > canvas->rows) scale.y=(double) canvas->columns/(double) canvas->rows; else if (canvas->columns < canvas->rows) { scale.x=(double) canvas->rows/(double) canvas->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); interpolate_view=AcquireVirtualCacheView(canvas,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas,implode_image,canvas->rows,1) #endif for (y=0; y < (ssize_t) canvas->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception); q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas->columns; x++) { register ssize_t i; /* Determine if the pixel is within an ellipse. */ if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2)) { SetPixelBackgoundColor(implode_image,q); p+=GetPixelChannels(canvas); q+=GetPixelChannels(implode_image); continue; } delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) for (i=0; i < (ssize_t) GetPixelChannels(canvas); i++) { PixelChannel channel = GetPixelChannelChannel(canvas,i); PixelTrait traits = GetPixelChannelTraits(canvas,channel); PixelTrait implode_traits = GetPixelChannelTraits(implode_image, channel); if ((traits == UndefinedPixelTrait) || (implode_traits == UndefinedPixelTrait)) continue; SetPixelChannel(implode_image,channel,p[i],q); } else { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount); status=InterpolatePixelChannels(canvas,interpolate_view,implode_image, method,(double) (factor*delta.x/scale.x+center.x),(double) (factor* delta.y/scale.y+center.y),q,exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas); q+=GetPixelChannels(implode_image); } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (canvas->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(canvas,ImplodeImageTag,progress++, canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image,const size_t number_frames, ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t n; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (n=1; n < (ssize_t) number_frames; n++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (n=0; n < (ssize_t) number_frames; n++) { CacheView *image_view, *morph_view; beta=(double) (n+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta* GetNextImageInList(next)->rows+0.5),next->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } status=SetImageStorageClass(morph_image,DirectClass,exception); if (status == MagickFalse) { morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++) { PixelChannel channel = GetPixelChannelChannel(morph_image,i); PixelTrait traits = GetPixelChannelTraits(morph_image,channel); PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel); if ((traits == UndefinedPixelTrait) || (morph_traits == UndefinedPixelTrait)) continue; if (((morph_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(morph_images,p) <= (QuantumRange/2))) { SetPixelChannel(morph_image,channel,p[i],q); continue; } SetPixelChannel(morph_image,channel,ClampToQuantum(alpha* GetPixelChannel(morph_images,channel,q)+beta*p[i]),q); } p+=GetPixelChannels(morph_image); q+=GetPixelChannels(morph_images); } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (n < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const double pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); if (plasma <= 0) return((Quantum) 0); if (plasma >= QuantumRange) return(QuantumRange); return(plasma); } static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view, CacheView *u_view,CacheView *v_view,RandomInfo *random_info, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { double plasma; register const Quantum *magick_restrict u, *magick_restrict v; register Quantum *magick_restrict q; register ssize_t i; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) && (fabs(segment->y2-segment->y1) <= MagickEpsilon)) return(MagickTrue); if (depth != 0) { MagickBooleanType status; SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); return(status); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->x2-x_mid) > MagickEpsilon)) { /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1, exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1, exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) > MagickEpsilon) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5), 1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5), 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) > MagickEpsilon) { /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) > MagickEpsilon) || (fabs(segment->y1-segment->y2) > MagickEpsilon)) { /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); u_view=AcquireVirtualCacheView(image,exception); v_view=AcquireVirtualCacheView(image,exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth,exception); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the PolaroidImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const char *caption,const double angle, % const PixelInterpolateMethod method,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o caption: the Polaroid caption. % % o angle: Apply the effect along this angle. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const char *caption,const double angle,const PixelInterpolateMethod method, ExceptionInfo *exception) { Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; if (caption != (const char *) NULL) { char geometry[MagickPathExtent], *text; DrawInfo *annotate_info; ImageInfo *image_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); image_info=AcquireImageInfo(); annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); text=InterpretImageProperties(image_info,(Image *) image,caption, exception); image_info=DestroyImageInfo(image_info); (void) CloneString(&annotate_info->text,text); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics, &text,exception); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)* (metrics.ascent-metrics.descent)+0.5),exception); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image,exception); (void) CloneString(&annotate_info->text,text); (void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info,exception); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); text=DestroyString(text); } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image,exception); (void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum, quantum,exception); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,caption_image,OverCompositeOp, MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception); caption_image=DestroyImage(caption_image); } (void) QueryColorCompliance("none",AllCompliance, &picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,method,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,picture_image,OverCompositeOp, MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception); picture_image=DestroyImage(picture_image); (void) QueryColorCompliance("none",AllCompliance, &polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double alpha, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double alpha, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; ChannelType channel_mask; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; PixelInfo background_color; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace,exception); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod, exception); border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color, exception); clone_image->alpha_trait=BlendPixelTrait; border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception); /* Shadow image. */ status=MagickTrue; background_color=border_image->background_color; background_color.alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(border_image,exception); for (y=0; y < (ssize_t) border_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { if (border_image->alpha_trait != UndefinedPixelTrait) background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0; SetPixelViaPixelInfo(border_image,&background_color,q); q+=GetPixelChannels(border_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { border_image=DestroyImage(border_image); return((Image *) NULL); } channel_mask=SetImageChannelMask(border_image,AlphaChannel); shadow_image=BlurImage(border_image,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); (void) SetPixelChannelMask(shadow_image,channel_mask); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the % center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) random_image->columns; x++) { double value; register ssize_t i; if (GetPixelWriteMask(random_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(random_image); continue; } value=GetPseudoRandomValue(random_info[id]); for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=ClampToQuantum(QuantumRange*value); } q+=GetPixelChannels(random_image); } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image,exception); (void) NegateImage(dodge_image,MagickFalse,exception); (void) TransformImage(&dodge_image,(char *) NULL,"50%",exception); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp, MagickTrue,0,0,exception); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } if (blend_image->alpha_trait != BlendPixelTrait) (void) SetImageAlpha(blend_image,TransparentAlpha,exception); (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue, 0,0,exception); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] > threshold) q[i]=QuantumRange-q[i]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelInfo pixel; register Quantum *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=stegano_image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { ssize_t offset; (void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel, exception); offset=k/(ssize_t) stegano_image->columns; if (offset >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (Quantum *) NULL) break; switch (c) { case 0: { SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 1: { SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 2: { SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == stegano_image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(right_image != (const Image *) NULL); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace,exception); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; register Quantum *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) || (r == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(image,GetPixelRed(left_image,p),r); SetPixelGreen(image,GetPixelGreen(right_image,q),r); SetPixelBlue(image,GetPixelBlue(right_image,q),r); if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0) SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+ GetPixelAlpha(right_image,q))/2,r); p+=GetPixelChannels(left_image); q+=GetPixelChannels(right_image); r+=GetPixelChannels(stereo_image); } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *canvas_view, *interpolate_view, *swirl_view; Image *canvas, *swirl_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlphaChannel(canvas,OpaqueAlphaChannel,exception); swirl_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (swirl_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } /* Compute scaling factor. */ center.x=(double) canvas->columns/2.0; center.y=(double) canvas->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (canvas->columns > canvas->rows) scale.y=(double) canvas->columns/(double) canvas->rows; else if (canvas->columns < canvas->rows) scale.x=(double) canvas->rows/(double) canvas->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); interpolate_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas,swirl_image,canvas->rows,1) #endif for (y=0; y < (ssize_t) canvas->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas->columns,1,exception); q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) canvas->columns; x++) { /* Determine if the pixel is within an ellipse. */ if (GetPixelWriteMask(canvas,p) <= (QuantumRange/2)) { SetPixelBackgoundColor(swirl_image,q); p+=GetPixelChannels(canvas); q+=GetPixelChannels(swirl_image); continue; } delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(canvas,i); PixelTrait traits = GetPixelChannelTraits(canvas,channel); PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image, channel); if ((traits == UndefinedPixelTrait) || (swirl_traits == UndefinedPixelTrait)) continue; SetPixelChannel(swirl_image,channel,p[i],q); } } else { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolatePixelChannels(canvas,interpolate_view,swirl_image, method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception); if (status == MagickFalse) break; } p+=GetPixelChannels(canvas); q+=GetPixelChannels(swirl_image); } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (canvas->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(canvas,SwirlImageTag,progress++,canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); interpolate_view=DestroyCacheView(interpolate_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *blend, % const PixelInfo *tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *blend, const PixelInfo *tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; double intensity; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo color_vector; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse) { tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace,exception); if (blend == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ GetPixelInfo(image,&color_vector); flags=ParseGeometry(blend,&geometry_info); color_vector.red=geometry_info.rho; color_vector.green=geometry_info.rho; color_vector.blue=geometry_info.rho; color_vector.alpha=(MagickRealType) OpaqueAlpha; if ((flags & SigmaValue) != 0) color_vector.green=geometry_info.sigma; if ((flags & XiValue) != 0) color_vector.blue=geometry_info.xi; if ((flags & PsiValue) != 0) color_vector.alpha=geometry_info.psi; if (image->colorspace == CMYKColorspace) { color_vector.black=geometry_info.rho; if ((flags & PsiValue) != 0) color_vector.black=geometry_info.psi; if ((flags & ChiValue) != 0) color_vector.alpha=geometry_info.chi; } intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint); color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity); color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity); color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity); color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity); color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; double weight; GetPixelInfo(image,&pixel); if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); continue; } weight=QuantumScale*GetPixelRed(image,p)-0.5; pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelGreen(image,p)-0.5; pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlue(image,p)-0.5; pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue* (1.0-(4.0*(weight*weight))); weight=QuantumScale*GetPixelBlack(image,p)-0.5; pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black* (1.0-(4.0*(weight*weight))); pixel.alpha=(MagickRealType) GetPixelAlpha(image,p); SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MagickPathExtent]; DrawInfo *draw_info; Image *canvas, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); return((Image *) NULL); } canvas->alpha_trait=BlendPixelTrait; oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue, exception); if (oval_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } (void) QueryColorCompliance("#000000",AllCompliance, &oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image,exception); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke, exception); (void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g," "0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x, image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } blur_image->alpha_trait=UndefinedPixelTrait; (void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue, 0,0,exception); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas,FlattenLayer,exception); canvas=DestroyImage(canvas); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace,exception); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o interpolate: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *canvas_view, *wave_view; Image *canvas, *wave_image; MagickBooleanType status; MagickOffsetType progress; double *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas=CloneImage(image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) return((Image *) NULL); if ((canvas->alpha_trait == UndefinedPixelTrait) && (canvas->background_color.alpha != OpaqueAlpha)) (void) SetImageAlpha(canvas,OpaqueAlpha,exception); wave_image=CloneImage(canvas,canvas->columns,(size_t) (canvas->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) { canvas=DestroyImage(canvas); return((Image *) NULL); } if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse) { canvas=DestroyImage(canvas); wave_image=DestroyImage(wave_image); return((Image *) NULL); } /* Allocate sine map. */ sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (double *) NULL) { canvas=DestroyImage(canvas); wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; canvas_view=AcquireVirtualCacheView(canvas,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(canvas_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(canvas,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolatePixelChannels(canvas,canvas_view,wave_image,method, (double) x,(double) (y-sine_map[x]),q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(wave_image); } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(canvas,WaveImageTag,progress++,canvas->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); canvas_view=DestroyCacheView(canvas_view); canvas=DestroyImage(canvas); sine_map=(double *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; register ssize_t i; p=pixels; q=pixels+scale*stride; r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; ssize_t channel; static const float noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f, 0.0080f, 0.0044f }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns), GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++) { register ssize_t i; size_t high_pass, low_pass; ssize_t level, y; PixelChannel pixel_channel; PixelTrait traits; if (status == MagickFalse) continue; traits=GetPixelChannelTraits(image,(PixelChannel) channel); if (traits == UndefinedPixelTrait) continue; pixel_channel=GetPixelChannelChannel(image,channel); if ((pixel_channel != RedPixelChannel) && (pixel_channel != GreenPixelChannel) && (pixel_channel != BluePixelChannel)) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { pixels[i++]=(float) p[channel]; p+=GetPixelChannels(image); } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x, y; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t x; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p); q+=low_pass; for (x=0; x < (ssize_t) image->columns; x++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t y; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p); for (y=0; y < (ssize_t) image->rows; y++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; ssize_t offset; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } offset=GetPixelChannelOffset(noise_image,pixel_channel); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType pixel; pixel=(MagickRealType) pixels[i]+pixels[low_pass+i]; q[offset]=ClampToQuantum(pixel); i++; q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); }
csrmv_merge.h
#ifndef __CSRMV_MERGE_H__ #define __CSRMV_MERGE_H__ #include <algorithm> #include "complex_ops.h" #include "openmp.h" #include "numpy/ndarraytypes.h" // See work my Merrill et. al. (http://ieeexplore.ieee.org/abstract/document/7877136/) for original work and implementation. // This code contains modified versions of algorithms 2 and 3. template<class I> class CountingInputIterator{ const I init; public: CountingInputIterator(I _init) : init(_init) {} I operator[](I i){return init+i;} }; template<class I> struct CoordinateT{ I x,y; CoordinateT(I _x,I _y) : x(_x), y(_y) {} }; template<class I,class AIteratorT,class BIteratorT> CoordinateT<I> MergePathSearch(I diagonal, I a_len, I b_len, AIteratorT a, BIteratorT b) { // Diagonal search range (in x coordinate space) I zero = 0; I x_min = std::max(diagonal - b_len, zero); I x_max = std::min(diagonal, a_len); // 2D binary-search along the diagonal search range while (x_min < x_max) { I pivot = (x_min + x_max) >> 1; if (a[pivot] <= b[diagonal - pivot - 1]) { // Keep top-right half of diagonal range x_min = pivot + 1; } else { // Keep bottom-left half of diagonal range x_max = pivot; } } return CoordinateT<I>( std::min(x_min, a_len), // x coordinate in A diagonal - x_min); // y coordinate in B } template<class I,class T1,class T2,class T3> void csrmv_merge(const bool overwrite_y, const I num_rows, const I row_offsets[], const I column_indices[], const T1 values[], const T2 alpha, const T3 x[], I row_carry_out[], T3 value_carry_out[], T3 y[]) { const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets const I num_nonzeros = row_offsets[num_rows]; int num_threads = omp_get_num_threads(); CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices) I num_merge_items = num_rows + num_nonzeros; // Merge path total length I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread if(overwrite_y){ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // if(overwrite_y){ // std::fill(y+thread_coord.x,y+thread_coord_end.x,T3(0)); // } // Consume merge items, whole rows first T3 running_total = T3(0); for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; y[thread_coord.x] = alpha * running_total; running_total = T3(0); } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } } else{ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // if(overwrite_y){ // std::fill(y+thread_coord.x,y+thread_coord_end.x,T3(0)); // } // Consume merge items, whole rows first T3 running_total = T3(0); for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; y[thread_coord.x] += alpha * running_total; running_total = T3(0); } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y]]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } } // Carry-out fix-up (rows spanning multiple threads) #pragma omp single { for (int tid = 0; tid < num_threads - 1; ++tid) if (row_carry_out[tid] < num_rows) y[row_carry_out[tid]] += alpha * value_carry_out[tid]; } } // same algorithm as for single vector but applied to multiple vectors. // the arrays are assumed to be on row-major order. cost O(n_vecs*nthreads) memory template<class I,class T1,class T2,class T3> void csrmv_merge_multi(const bool overwrite_y, const I num_rows, const npy_intp n_vecs, const I row_offsets[], const I column_indices[], const T1 values[], const T2 alpha, const T3 x[], I row_carry_out[], T3 value_carry_out[], T3 y[]) { const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets const I num_nonzeros = row_offsets[num_rows]; int num_threads = omp_get_num_threads(); CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices) I num_merge_items = num_rows + num_nonzeros; // Merge path total length I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread if(overwrite_y){ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // if(overwrite_y){ // std::fill(y+thread_coord.x,y+thread_coord_end.x,T3(0)); // } // Consume merge items, whole rows first T3 * running_total = &value_carry_out[n_vecs * tid]; std::fill(running_total,running_total+n_vecs,0); for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { const I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y){ const npy_intp x_row = column_indices[thread_coord.y] * n_vecs; const T1 val = values[thread_coord.y]; for(npy_intp vec_n=0;vec_n<n_vecs;vec_n++){ running_total[vec_n] += val * x[x_row+vec_n]; } } const npy_intp y_row = thread_coord.x * n_vecs; for(npy_intp vec_n=0;vec_n<n_vecs;vec_n++){ y[y_row + vec_n] = alpha * running_total[vec_n]; // overwrite y values } std::fill(running_total,running_total+n_vecs,0); } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y){ const npy_intp x_row = column_indices[thread_coord.y] * n_vecs; const T1 val = values[thread_coord.y]; for(npy_intp vec_n=0;vec_n<n_vecs;vec_n++){ running_total[vec_n] += val * x[x_row+vec_n]; } } // Save carry-outs row_carry_out[tid] = thread_coord_end.x; // value_carry_out[tid] = running_total; } } else{ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // if(overwrite_y){ // std::fill(y+thread_coord.x,y+thread_coord_end.x,T3(0)); // } // Consume merge items, whole rows first T3 * running_total = &value_carry_out[n_vecs * tid]; std::fill(running_total,running_total+n_vecs,0); for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { const I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y){ const npy_intp x_row = column_indices[thread_coord.y] * n_vecs; const T1 val = values[thread_coord.y]; for(npy_intp vec_n=0;vec_n<n_vecs;vec_n++){ running_total[vec_n] += val * x[x_row+vec_n]; } } const npy_intp y_row = thread_coord.x * n_vecs; for(npy_intp vec_n=0;vec_n<n_vecs;vec_n++){ y[y_row + vec_n] += alpha * running_total[vec_n]; // add to y values } std::fill(running_total,running_total+n_vecs,0); } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y){ const npy_intp x_row = column_indices[thread_coord.y] * n_vecs; const T1 val = values[thread_coord.y]; for(npy_intp vec_n=0;vec_n<n_vecs;vec_n++){ running_total[vec_n] += val * x[x_row+vec_n]; } } // Save carry-outs row_carry_out[tid] = thread_coord_end.x; // value_carry_out[tid] = running_total; } } // Carry-out fix-up (rows spanning multiple threads) #pragma omp single { for (int tid = 0; tid < num_threads - 1; ++tid){ if (row_carry_out[tid] < num_rows){ const npy_intp y_row = n_vecs * row_carry_out[tid]; T3 * running_total = &value_carry_out[n_vecs * tid]; for(npy_intp vec_n=0;vec_n < n_vecs;vec_n++){ y[y_row + vec_n] += alpha * running_total[vec_n]; } } } } } template<class I,class T1,class T2,class T3> void csrmv_merge_strided(const bool overwrite_y, const I num_rows, const I row_offsets[], const I column_indices[], const T1 values[], const T2 alpha, const npy_intp stride_x, const T3 x[], I row_carry_out[], T3 value_carry_out[], const npy_intp stride_y, T3 y[]) { const I* row_end_offsets = row_offsets + 1; // Merge list A: row end-offsets const I num_nonzeros = row_offsets[num_rows]; int num_threads = omp_get_num_threads(); CountingInputIterator<I> nz_indices(0); // Merge list B: Natural numbers(NZ indices) I num_merge_items = num_rows + num_nonzeros; // Merge path total length I items_per_thread = (num_merge_items + num_threads - 1) / num_threads; // Merge items per thread // if(overwrite_y){ // #pragma omp for schedule(static) // for(I i=0;i<num_rows;i++){ // y[i * stride_y] = 0; // } // } if(overwrite_y){ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // Consume merge items, whole rows first T3 running_total = 0.0; for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; y[thread_coord.x * stride_y] = alpha * running_total; // assign vs. add in-place running_total = 0.0; } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } } else{ // Spawn parallel threads #pragma omp for schedule(static,1) for (int tid = 0; tid < num_threads; tid++) { // Find starting and ending MergePath coordinates (row-idx, nonzero-idx) for each thread I diagonal = std::min(items_per_thread * tid, num_merge_items); I diagonal_end = std::min(diagonal + items_per_thread, num_merge_items); CoordinateT<I> thread_coord = MergePathSearch(diagonal, num_rows, num_nonzeros, row_end_offsets, nz_indices); CoordinateT<I> thread_coord_end = MergePathSearch(diagonal_end, num_rows, num_nonzeros,row_end_offsets, nz_indices); // Consume merge items, whole rows first T3 running_total = 0.0; for (; thread_coord.x < thread_coord_end.x; ++thread_coord.x) { I row_end_offset = row_end_offsets[thread_coord.x]; for (; thread_coord.y < row_end_offset; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; y[thread_coord.x * stride_y] += alpha * running_total; // add in-place vs. assign running_total = 0.0; } // Consume partial portion of thread's last row for (; thread_coord.y < thread_coord_end.y; ++thread_coord.y) running_total += values[thread_coord.y] * x[column_indices[thread_coord.y] * stride_x]; // Save carry-outs row_carry_out[tid] = thread_coord_end.x; value_carry_out[tid] = running_total; } } // Carry-out fix-up (rows spanning multiple threads) #pragma omp single { for (int tid = 0; tid < num_threads - 1; ++tid) if (row_carry_out[tid] < num_rows) y[row_carry_out[tid] * stride_y] += alpha * value_carry_out[tid]; } } #endif
GB_unaryop__abs_uint8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_uint32 // op(A') function: GB_tran__abs_uint8_uint32 // C type: uint8_t // A type: uint32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_uint32 ( uint8_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__le_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_int64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__le_int64) // A.*B function (eWiseMult): GB (_AemultB_03__le_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int64) // A*D function (colscale): GB (_AxD__le_int64) // D*A function (rowscale): GB (_DxB__le_int64) // C+=B function (dense accum): GB (_Cdense_accumB__le_int64) // C+=b function (dense accum): GB (_Cdense_accumb__le_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int64) // C=scalar+B GB (_bind1st__le_int64) // C=scalar+B' GB (_bind1st_tran__le_int64) // C=A+scalar GB (_bind2nd__le_int64) // C=A'+scalar GB (_bind2nd_tran__le_int64) // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_INT64 || GxB_NO_LE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__le_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__le_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include "rsbench.h" int main(int argc, char * argv[]) { // ===================================================================== // Initialization & Command Line Read-In // ===================================================================== int version = 12; double start, stop; // set the env variables for thread affinity setenv("OMP_PLACES","cores",1); system("echo $OMP_PLACES"); setenv("KMP_AFFINITY","compact",1); system("echo $KMP_AFFINITY"); // Process CLI Fields Input input = read_CLI( argc, argv ); // Set number of OpenMP Threads omp_set_num_threads(input.nthreads); // ===================================================================== // Print-out of Input Summary // ===================================================================== logo(version); center_print("INPUT SUMMARY", 79); border_print(); print_input_summary(input); // ===================================================================== // Intialize Simulation Data Structures // ===================================================================== border_print(); center_print("INITIALIZATION", 79); border_print(); start = get_time(); SimulationData SD = initialize_simulation( input ); stop = get_time(); printf("Initialization Complete. (%.2lf seconds)\n", stop-start); // ===================================================================== // Cross Section (XS) Parallel Lookup Simulation Begins // ===================================================================== border_print(); center_print("SIMULATION", 79); border_print(); unsigned long vhash = 0; // Run Simulation start = get_time(); // Run simulation if( input.simulation_method == EVENT_BASED ) { if( input.kernel_id == 0 ) run_event_based_simulation(input, SD, &vhash ); else if( input.kernel_id == 1 ) run_event_based_simulation_optimization_1(input, SD, &vhash ); else { printf("Error: No kernel ID %d found!\n", input.kernel_id); exit(1); } } else if( input.simulation_method == HISTORY_BASED ) run_history_based_simulation(input, SD, &vhash ); stop = get_time(); // Final hash step vhash = vhash % 999983; printf("Simulation Complete.\n"); // ===================================================================== // Print / Save Results and Exit // ===================================================================== border_print(); center_print("RESULTS", 79); border_print(); int is_invalid = validate_and_print_results(input, stop-start, vhash); border_print(); return is_invalid; } // init.c SimulationData initialize_simulation( Input input ) { uint64_t seed = INITIALIZATION_SEED; // Get material data printf("Loading Hoogenboom-Martin material data...\n"); SimulationData SD = get_materials( input, &seed ); // Allocate & fill energy grids printf("Generating resonance distributions...\n"); SD.n_poles = generate_n_poles( input, &seed ); SD.length_n_poles = input.n_nuclides; // Allocate & fill Window grids printf("Generating window distributions...\n"); SD.n_windows = generate_n_windows( input, &seed ); SD.length_n_windows = input.n_nuclides; // Prepare full resonance grid printf("Generating resonance parameter grid...\n"); SD.poles = generate_poles( input, SD.n_poles, &seed, &SD.max_num_poles ); SD.length_poles = input.n_nuclides * SD.max_num_poles; // Prepare full Window grid printf("Generating window parameter grid...\n"); SD.windows = generate_window_params( input, SD.n_windows, SD.n_poles, &seed, &SD.max_num_windows); SD.length_windows = input.n_nuclides * SD.max_num_windows; // Prepare 0K Resonances printf("Generating 0K l_value data...\n"); SD.pseudo_K0RS = generate_pseudo_K0RS( input, &seed ); SD.length_pseudo_K0RS = input.n_nuclides * input.numL; return SD; } int * generate_n_poles( Input input, uint64_t * seed ) { int total_resonances = input.avg_n_poles * input.n_nuclides; int * R = (int *) malloc( input.n_nuclides * sizeof(int)); // Ensure all nuclides have at least 1 resonance for( int i = 0; i < input.n_nuclides; i++ ) R[i] = 1; // Sample the rest for( int i = 0; i < total_resonances - input.n_nuclides; i++ ) R[LCG_random_int(seed) % input.n_nuclides]++; /* Debug for( int i = 0; i < input.n_nuclides; i++ ) printf("R[%d] = %d\n", i, R[i]); */ return R; } int * generate_n_windows( Input input, uint64_t * seed ) { int total_resonances = input.avg_n_windows * input.n_nuclides; int * R = (int *) malloc( input.n_nuclides * sizeof(int)); // Ensure all nuclides have at least 1 resonance for( int i = 0; i < input.n_nuclides; i++ ) R[i] = 1; // Sample the rest for( int i = 0; i < total_resonances - input.n_nuclides; i++ ) R[LCG_random_int(seed) % input.n_nuclides]++; /* Debug for( int i = 0; i < input.n_nuclides; i++ ) printf("R[%d] = %d\n", i, R[i]); */ return R; } Pole * generate_poles( Input input, int * n_poles, uint64_t * seed, int * max_num_poles ) { // Pole Scaling Factor -- Used to bias hitting of the fast Faddeeva // region to approximately 99.5% (i.e., only 0.5% of lookups should // require the full eval). double f = 152.5; RSComplex f_c = {f, 0}; int max_poles = -1; for( int i = 0; i < input.n_nuclides; i++ ) { if( n_poles[i] > max_poles) max_poles = n_poles[i]; } *max_num_poles = max_poles; // Allocating 2D matrix as a 1D contiguous vector Pole * R = (Pole *) malloc( input.n_nuclides * max_poles * sizeof(Pole)); // fill with data for( int i = 0; i < input.n_nuclides; i++ ) for( int j = 0; j < n_poles[i]; j++ ) { double r = LCG_random_double(seed); double im = LCG_random_double(seed); RSComplex t1 = {r, im}; R[i * max_poles + j].MP_EA = c_mul(f_c,t1); r = LCG_random_double(seed); im = LCG_random_double(seed); RSComplex t2 = {f*r, im}; R[i * max_poles + j].MP_RT = t2; r = LCG_random_double(seed); im = LCG_random_double(seed); RSComplex t3 = {f*r, im}; R[i * max_poles + j].MP_RA = t3; r = LCG_random_double(seed); im = LCG_random_double(seed); RSComplex t4 = {f*r, im}; R[i * max_poles + j].MP_RF = t4; R[i * max_poles + j].l_value = LCG_random_int(seed) % input.numL; } /* Debug for( int i = 0; i < input.n_nuclides; i++ ) for( int j = 0; j < n_poles[i]; j++ ) printf("R[%d][%d]: Eo = %lf lambda_o = %lf Tn = %lf Tg = %lf Tf = %lf\n", i, j, R[i * max_poles + j].Eo, R[i * max_poles + j].lambda_o, R[i * max_poles + j].Tn, R[i * max_poles + j].Tg, R[i * max_poles + j].Tf); */ return R; } Window * generate_window_params( Input input, int * n_windows, int * n_poles, uint64_t * seed, int * max_num_windows ) { int max_windows = -1; for( int i = 0; i < input.n_nuclides; i++ ) { if( n_windows[i] > max_windows) max_windows = n_windows[i]; } *max_num_windows = max_windows; // Allocating 2D contiguous matrix Window * R = (Window *) malloc( input.n_nuclides * max_windows * sizeof(Window)); // fill with data for( int i = 0; i < input.n_nuclides; i++ ) { int space = n_poles[i] / n_windows[i]; int remainder = n_poles[i] - space * n_windows[i]; int ctr = 0; for( int j = 0; j < n_windows[i]; j++ ) { R[i * max_windows + j].T = LCG_random_double(seed); R[i * max_windows + j].A = LCG_random_double(seed); R[i * max_windows + j].F = LCG_random_double(seed); R[i * max_windows + j].start = ctr; R[i * max_windows + j].end = ctr + space - 1; ctr += space; if ( j < remainder ) { ctr++; R[i * max_windows + j].end++; } } } return R; } double * generate_pseudo_K0RS( Input input, uint64_t * seed ) { double * R = (double *) malloc( input.n_nuclides * input.numL * sizeof(double)); for( int i = 0; i < input.n_nuclides; i++) for( int j = 0; j < input.numL; j++ ) R[i * input.numL + j] = LCG_random_double(seed); return R; } // io.c // Prints program logo void logo(int version) { border_print(); printf( " _____ _____ ____ _ \n" " | __ \\ / ____| _ \\ | | \n" " | |__) | (___ | |_) | ___ _ __ ___| |__ \n" " | _ / \\___ \\| _ < / _ \\ '_ \\ / __| '_ \\ \n" " | | \\ \\ ____) | |_) | __/ | | | (__| | | |\n" " |_| \\_\\_____/|____/ \\___|_| |_|\\___|_| |_|\n\n" ); border_print(); center_print("Developed at Argonne National Laboratory", 79); char v[100]; sprintf(v, "Version: %d", version); center_print(v, 79); border_print(); } // Prints Section titles in center of 80 char terminal void center_print(const char *s, int width) { int length = strlen(s); int i; for (i=0; i<=(width-length)/2; i++) { fputs(" ", stdout); } fputs(s, stdout); fputs("\n", stdout); } void border_print(void) { printf( "===================================================================" "=============\n"); } // Prints comma separated integers - for ease of reading void fancy_int( int a ) { if( a < 1000 ) printf("%d\n",a); else if( a >= 1000 && a < 1000000 ) printf("%d,%03d\n", a / 1000, a % 1000); else if( a >= 1000000 && a < 1000000000 ) printf("%d,%03d,%03d\n", a / 1000000, (a % 1000000) / 1000, a % 1000 ); else if( a >= 1000000000 ) printf("%d,%03d,%03d,%03d\n", a / 1000000000, (a % 1000000000) / 1000000, (a % 1000000) / 1000, a % 1000 ); else printf("%d\n",a); } Input read_CLI( int argc, char * argv[] ) { Input input; // defaults to the history based simulation method input.simulation_method = HISTORY_BASED; // defaults to max threads on the system input.nthreads = omp_get_num_procs(); // defaults to 355 (corresponding to H-M Large benchmark) input.n_nuclides = 355; // defaults to 300,000 input.particles = 300000; // defaults to 34 input.lookups = 34; // defaults to H-M Large benchmark input.HM = LARGE; // defaults to 3000 resonancs (avg) per nuclide input.avg_n_poles = 1000; // defaults to 100 input.avg_n_windows = 100; // defaults to 4; input.numL = 4; // defaults to no temperature dependence (Doppler broadening) input.doppler = 1; // defaults to baseline simulation kernel input.kernel_id = 0; int default_lookups = 1; int default_particles = 1; // Collect Raw Input for( int i = 1; i < argc; i++ ) { char * arg = argv[i]; // nthreads (-t) if( strcmp(arg, "-t") == 0 ) { if( ++i < argc ) input.nthreads = atoi(argv[i]); else print_CLI_error(); } // Simulation Method (-m) else if( strcmp(arg, "-m") == 0 ) { char * sim_type; if( ++i < argc ) sim_type = argv[i]; else print_CLI_error(); if( strcmp(sim_type, "history") == 0 ) input.simulation_method = HISTORY_BASED; else if( strcmp(sim_type, "event") == 0 ) { input.simulation_method = EVENT_BASED; // Also resets default # of lookups if( default_lookups && default_particles ) { input.lookups = input.lookups * input.particles; input.particles = 0; } } else print_CLI_error(); } // lookups (-l) else if( strcmp(arg, "-l") == 0 ) { if( ++i < argc ) { input.lookups = atoi(argv[i]); default_lookups = 0; } else print_CLI_error(); } // particles (-p) else if( strcmp(arg, "-p") == 0 ) { if( ++i < argc ) { input.particles = atoi(argv[i]); default_particles = 0; } else print_CLI_error(); } // nuclides (-n) else if( strcmp(arg, "-n") == 0 ) { if( ++i < argc ) input.n_nuclides = atoi(argv[i]); else print_CLI_error(); } // HM (-s) else if( strcmp(arg, "-s") == 0 ) { if( ++i < argc ) { if( strcmp(argv[i], "small") == 0 ) input.HM = SMALL; else if ( strcmp(argv[i], "large") == 0 ) input.HM = LARGE; else print_CLI_error(); } else print_CLI_error(); } // Doppler Broadening (Temperature Dependence) else if( strcmp(arg, "-d") == 0 ) { input.doppler = 0; } // Avg number of windows per nuclide (-w) else if( strcmp(arg, "-W") == 0 ) { if( ++i < argc ) input.avg_n_windows = atoi(argv[i]); else print_CLI_error(); } // Avg number of poles per nuclide (-p) else if( strcmp(arg, "-P") == 0 ) { if( ++i < argc ) input.avg_n_poles = atoi(argv[i]); else print_CLI_error(); } // Kernel ID (-k) else if( strcmp(arg, "-k") == 0 ) { if( ++i < argc ) input.kernel_id = atoi(argv[i]); else print_CLI_error(); } else print_CLI_error(); } // Validate Input // Validate nthreads if( input.nthreads < 1 ) print_CLI_error(); // Validate n_isotopes if( input.n_nuclides < 1 ) print_CLI_error(); // Validate lookups if( input.lookups < 1 ) print_CLI_error(); // Validate lookups if( input.avg_n_poles < 1 ) print_CLI_error(); // Validate lookups if( input.avg_n_windows < 1 ) print_CLI_error(); // Set HM size specific parameters // (defaults to large) if( input.HM == SMALL ) input.n_nuclides = 68; // Return input struct return input; } void print_CLI_error(void) { printf("Usage: ./multibench <options>\n"); printf("Options include:\n"); printf(" -t <threads> Number of OpenMP threads to run\n"); printf(" -m <simulation method> Simulation method (history, event)\n"); printf(" -s <size> Size of H-M Benchmark to run (small, large)\n"); printf(" -l <lookups> Number of Cross-section (XS) lookups per particle history\n"); printf(" -p <particles> Number of particle histories\n"); printf(" -P <poles> Average Number of Poles per Nuclide\n"); printf(" -W <poles> Average Number of Windows per Nuclide\n"); printf(" -d Disables Temperature Dependence (Doppler Broadening)\n"); printf("Default is equivalent to: -s large -m history -l 34 -p 300000 -P 1000 -W 100\n"); printf("See readme for full description of default run values\n"); exit(4); } void print_input_summary(Input input) { // Calculate Estimate of Memory Usage size_t mem = get_mem_estimate(input); if( input.simulation_method == EVENT_BASED ) printf("Simulation Method: Event Based\n"); else printf("Simulation Method: History Based\n"); printf("Materials: 12\n"); printf("H-M Benchmark Size: "); if( input.HM == 0 ) printf("Small\n"); else printf("Large\n"); if( input.doppler == 1 ) printf("Temperature Dependence: ON\n"); else printf("Temperature Dependence: OFF\n"); printf("Total Nuclides: %d\n", input.n_nuclides); printf("Avg Poles per Nuclide: "); fancy_int(input.avg_n_poles); printf("Avg Windows per Nuclide: "); fancy_int(input.avg_n_windows); int lookups = input.lookups; if( input.simulation_method == HISTORY_BASED ) { printf("Particles: "); fancy_int(input.particles); printf("XS Lookups per Particle: "); fancy_int(input.lookups); lookups *= input.particles; } printf("Total XS Lookups: "); fancy_int(lookups); printf("Threads: %d\n", input.nthreads); printf("Est. Memory Usage (MB): %.1lf\n", mem / 1024.0 / 1024.0); } int validate_and_print_results(Input input, double runtime, unsigned long vhash) { printf("Threads: %d\n", input.nthreads); printf("Runtime: %.3lf seconds\n", runtime); int lookups = 0; if( input.simulation_method == HISTORY_BASED ) lookups = input.lookups*input.particles; else lookups = input.lookups; printf("Lookups: "); fancy_int(lookups); printf("Lookups/s: "); fancy_int((double) lookups / (runtime)); int is_invalid = 1; unsigned long long large = 0; unsigned long long small = 0; if(input.simulation_method == HISTORY_BASED ) { large = 351485; small = 879693; } else if( input.simulation_method == EVENT_BASED ) { large = 358389; small = 880018; } if( input.HM == LARGE ) { if( vhash == large ) { printf("Verification checksum: %lu (Valid)\n", vhash); is_invalid = 0; } else printf("Verification checksum: %lu (WARNING - INAVALID CHECKSUM!)\n", vhash); } else if( input.HM == SMALL ) { if( vhash == small ) { printf("Verification checksum: %lu (Valid)\n", vhash); is_invalid = 0; } else printf("Verification checksum: %lu (WARNING - INAVALID CHECKSUM!)\n", vhash); } return is_invalid; } // simulation.c //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // implementation of the algorithm, with only minor CPU optimizations in place. // Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, RSBench will only run the baseline implementation. Optimized variants // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// void run_event_based_simulation(Input input, SimulationData data, unsigned long * vhash_result ) { printf("Beginning baseline event based simulation...\n"); unsigned long verification = 0; // Main simulation loop over macroscopic cross section lookups #pragma omp parallel for schedule(dynamic, 1000) default(none) shared(input, data) reduction(+:verification) for( int i = 0; i < input.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double E = LCG_random_double(&seed); int mat = pick_mat(&seed); double macro_xs[4] = {0}; calculate_macro_xs( macro_xs, mat, E, input, data ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on it. For other accelerators, // a different approach might be required (e.g., atomics, reduction // of thread-specific values in large array via CUDA thrust, etc) double max = -DBL_MAX; int max_idx = 0; for(int x = 0; x < 4; x++ ) { if( macro_xs[x] > max ) { max = macro_xs[x]; max_idx = x; } } verification += max_idx+1; } *vhash_result = verification; } void run_history_based_simulation(Input input, SimulationData data, unsigned long * vhash_result ) { printf("Beginning history based simulation...\n"); unsigned long verification = 0; // Main simulation loop over particle histories #pragma omp parallel for schedule(dynamic, 1000) default(none) shared(input, data) reduction(+:verification) for( int p = 0; p < input.particles; p++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, p * input.lookups * 2 * 4); // Randomly pick an energy and material for the particle double E = LCG_random_double(&seed); int mat = pick_mat(&seed); // Loop over macroscopic cross section events. This loop is dependent! // I.e., This loop must be executed sequentially, // as each lookup depends on results from the previous lookup. for( int i = 0; i < input.lookups; i++ ) { double macro_xs[4] = {0}; calculate_macro_xs( macro_xs, mat, E, input, data ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on it. For other accelerators, // a different approach might be required (e.g., atomics, reduction // of thread-specific values in large array via CUDA thrust, etc) double max = -DBL_MAX; int max_idx = 0; for(int x = 0; x < 4; x++ ) { if( macro_xs[x] > max ) { max = macro_xs[x]; max_idx = x; } } verification += max_idx+1; // Randomly pick next energy and material for the particle // Also incorporates results from macro_xs lookup to // enforce loop dependency. // In a real MC app, this dependency is expressed in terms // of branching physics sampling, whereas here we are just // artificially enforcing this dependence based on altering // the seed uint64_t n_forward = 0; for( int x = 0; x < 4; x++ ) if( macro_xs[x] > 1.0 ) n_forward++; if( n_forward > 0 ) seed = fast_forward_LCG(seed, n_forward); E = LCG_random_double(&seed); mat = pick_mat(&seed); } } *vhash_result = verification; } void calculate_macro_xs( double * macro_xs, int mat, double E, Input input, SimulationData data ) { // zero out macro vector for( int i = 0; i < 4; i++ ) macro_xs[i] = 0; // for nuclide in mat for( int i = 0; i < data.num_nucs[mat]; i++ ) { double micro_xs[4]; int nuc = data.mats[mat * data.max_num_nucs + i]; if( input.doppler == 1 ) calculate_micro_xs_doppler( micro_xs, nuc, E, input, data ); else calculate_micro_xs( micro_xs, nuc, E, input, data); for( int j = 0; j < 4; j++ ) { macro_xs[j] += micro_xs[j] * data.concs[mat * data.max_num_nucs + i]; } // Debug /* printf("E = %.2lf, mat = %d, macro_xs[0] = %.2lf, macro_xs[1] = %.2lf, macro_xs[2] = %.2lf, macro_xs[3] = %.2lf\n", E, mat, macro_xs[0], macro_xs[1], macro_xs[2], macro_xs[3] ); */ } // Debug /* printf("E = %.2lf, mat = %d, macro_xs[0] = %.2lf, macro_xs[1] = %.2lf, macro_xs[2] = %.2lf, macro_xs[3] = %.2lf\n", E, mat, macro_xs[0], macro_xs[1], macro_xs[2], macro_xs[3] ); */ } // No Temperature dependence (i.e., 0K evaluation) void calculate_micro_xs( double * micro_xs, int nuc, double E, Input input, SimulationData data) { // MicroScopic XS's to Calculate double sigT; double sigA; double sigF; double sigE; // Calculate Window Index double spacing = 1.0 / data.n_windows[nuc]; int window = (int) ( E / spacing ); if( window == data.n_windows[nuc] ) window--; // Calculate sigTfactors RSComplex sigTfactors[4]; // Of length input.numL, which is always 4 calculate_sig_T(nuc, E, input, data, sigTfactors ); // Calculate contributions from window "background" (i.e., poles outside window (pre-calculated) Window w = data.windows[nuc * data.max_num_windows + window]; sigT = E * w.T; sigA = E * w.A; sigF = E * w.F; // Loop over Poles within window, add contributions for( int i = w.start; i < w.end; i++ ) { RSComplex PSIIKI; RSComplex CDUM; Pole pole = data.poles[nuc * data.max_num_poles + i]; RSComplex t1 = {0, 1}; RSComplex t2 = {sqrt(E), 0 }; PSIIKI = c_div( t1 , c_sub(pole.MP_EA,t2) ); RSComplex E_c = {E, 0}; CDUM = c_div(PSIIKI, E_c); sigT += (c_mul(pole.MP_RT, c_mul(CDUM, sigTfactors[pole.l_value])) ).r; sigA += (c_mul( pole.MP_RA, CDUM)).r; sigF += (c_mul(pole.MP_RF, CDUM)).r; } sigE = sigT - sigA; micro_xs[0] = sigT; micro_xs[1] = sigA; micro_xs[2] = sigF; micro_xs[3] = sigE; } // Temperature Dependent Variation of Kernel // (This involves using the Complex Faddeeva function to // Doppler broaden the poles within the window) void calculate_micro_xs_doppler( double * micro_xs, int nuc, double E, Input input, SimulationData data ) { // MicroScopic XS's to Calculate double sigT; double sigA; double sigF; double sigE; // Calculate Window Index double spacing = 1.0 / data.n_windows[nuc]; int window = (int) ( E / spacing ); if( window == data.n_windows[nuc] ) window--; // Calculate sigTfactors RSComplex sigTfactors[4]; // Of length input.numL, which is always 4 calculate_sig_T(nuc, E, input, data, sigTfactors ); // Calculate contributions from window "background" (i.e., poles outside window (pre-calculated) Window w = data.windows[nuc * data.max_num_windows + window]; sigT = E * w.T; sigA = E * w.A; sigF = E * w.F; double dopp = 0.5; // Loop over Poles within window, add contributions for( int i = w.start; i < w.end; i++ ) { Pole pole = data.poles[nuc * data.max_num_poles + i]; // Prep Z RSComplex E_c = {E, 0}; RSComplex dopp_c = {dopp, 0}; RSComplex Z = c_mul(c_sub(E_c, pole.MP_EA), dopp_c); // Evaluate Fadeeva Function RSComplex faddeeva = fast_nuclear_W( Z ); // Update W sigT += (c_mul( pole.MP_RT, c_mul(faddeeva, sigTfactors[pole.l_value]) )).r; sigA += (c_mul( pole.MP_RA , faddeeva)).r; sigF += (c_mul( pole.MP_RF , faddeeva)).r; } sigE = sigT - sigA; micro_xs[0] = sigT; micro_xs[1] = sigA; micro_xs[2] = sigF; micro_xs[3] = sigE; } void calculate_sig_T( int nuc, double E, Input input, SimulationData data, RSComplex * sigTfactors ) { double phi; for( int i = 0; i < 4; i++ ) { phi = data.pseudo_K0RS[nuc * input.numL + i] * sqrt(E); if( i == 1 ) phi -= - atan( phi ); else if( i == 2 ) phi -= atan( 3.0 * phi / (3.0 - phi*phi)); else if( i == 3 ) phi -= atan(phi*(15.0-phi*phi)/(15.0-6.0*phi*phi)); phi *= 2.0; sigTfactors[i].r = cos(phi); sigTfactors[i].i = -sin(phi); } } // This function uses a combination of the Abrarov Approximation // and the QUICK_W three term asymptotic expansion. // Only expected to use Abrarov ~0.5% of the time. RSComplex fast_nuclear_W( RSComplex Z ) { // Abrarov if( c_abs(Z) < 6.0 ) { // Precomputed parts for speeding things up // (N = 10, Tm = 12.0) RSComplex prefactor = {0, 8.124330e+01}; double an[10] = { 2.758402e-01, 2.245740e-01, 1.594149e-01, 9.866577e-02, 5.324414e-02, 2.505215e-02, 1.027747e-02, 3.676164e-03, 1.146494e-03, 3.117570e-04 }; double neg_1n[10] = { -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0 }; double denominator_left[10] = { 9.869604e+00, 3.947842e+01, 8.882644e+01, 1.579137e+02, 2.467401e+02, 3.553058e+02, 4.836106e+02, 6.316547e+02, 7.994380e+02, 9.869604e+02 }; RSComplex t1 = {0, 12}; RSComplex t2 = {12, 0}; RSComplex i = {0,1}; RSComplex one = {1, 0}; RSComplex W = c_div(c_mul(i, ( c_sub(one, fast_cexp(c_mul(t1, Z))) )) , c_mul(t2, Z)); RSComplex sum = {0,0}; for( int n = 0; n < 10; n++ ) { RSComplex t3 = {neg_1n[n], 0}; RSComplex top = c_sub(c_mul(t3, fast_cexp(c_mul(t1, Z))), one); RSComplex t4 = {denominator_left[n], 0}; RSComplex t5 = {144, 0}; RSComplex bot = c_sub(t4, c_mul(t5,c_mul(Z,Z))); RSComplex t6 = {an[n], 0}; sum = c_add(sum, c_mul(t6, c_div(top,bot))); } W = c_add(W, c_mul(prefactor, c_mul(Z, sum))); return W; } else { // QUICK_2 3 Term Asymptotic Expansion (Accurate to O(1e-6)). // Pre-computed parameters RSComplex a = {0.512424224754768462984202823134979415014943561548661637413182,0}; RSComplex b = {0.275255128608410950901357962647054304017026259671664935783653, 0}; RSComplex c = {0.051765358792987823963876628425793170829107067780337219430904, 0}; RSComplex d = {2.724744871391589049098642037352945695982973740328335064216346, 0}; RSComplex i = {0,1}; RSComplex Z2 = c_mul(Z, Z); // Three Term Asymptotic Expansion RSComplex W = c_mul(c_mul(Z,i), (c_add(c_div(a,(c_sub(Z2, b))) , c_div(c,(c_sub(Z2, d)))))); return W; } } double LCG_random_double(uint64_t * seed) { const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } uint64_t LCG_random_int(uint64_t * seed) { const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return *seed; } uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } // Complex arithmetic functions RSComplex c_add( RSComplex A, RSComplex B) { RSComplex C; C.r = A.r + B.r; C.i = A.i + B.i; return C; } RSComplex c_sub( RSComplex A, RSComplex B) { RSComplex C; C.r = A.r - B.r; C.i = A.i - B.i; return C; } RSComplex c_mul( RSComplex A, RSComplex B) { double a = A.r; double b = A.i; double c = B.r; double d = B.i; RSComplex C; C.r = (a*c) - (b*d); C.i = (a*d) + (b*c); return C; } RSComplex c_div( RSComplex A, RSComplex B) { double a = A.r; double b = A.i; double c = B.r; double d = B.i; RSComplex C; double denom = c*c + d*d; C.r = ( (a*c) + (b*d) ) / denom; C.i = ( (b*c) - (a*d) ) / denom; return C; } double c_abs( RSComplex A) { return sqrt(A.r*A.r + A.i * A.i); } // Fast (but inaccurate) exponential function // Written By "ACMer": // https://codingforspeed.com/using-faster-exponential-approximation/ // We use our own to avoid small differences in compiler specific // exp() intrinsic implementations that make it difficult to verify // if the code is working correctly or not. double fast_exp(double x) { x = 1.0 + x * 0.000244140625; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; x *= x; return x; } // Implementation based on: // z = x + iy // cexp(z) = e^x * (cos(y) + i * sin(y)) RSComplex fast_cexp( RSComplex z ) { double x = z.r; double y = z.i; // For consistency across architectures, we // will use our own exponetial implementation //double t1 = exp(x); double t1 = fast_exp(x); double t2 = cos(y); double t3 = sin(y); RSComplex t4 = {t2, t3}; RSComplex t5 = {t1, 0}; RSComplex result = c_mul(t5, (t4)); return result; } //////////////////////////////////////////////////////////////////////////////////// // Parallel Quicksort Key-Value Sorting Algorithms //////////////////////////////////////////////////////////////////////////////////// // // These algorithms are based on the parallel quicksort implementation by // Eduard Lopez published at https://github.com/eduardlopez/quicksort-parallel // // Eduard's original version was for an integer type quicksort, but I have modified // it to form two different versions that can sort key-value pairs together without // having to bundle them into a separate object. Additionally, I have modified the // optimal chunk sizes and restricted the number of threads for the array sizing // that XSBench will be using by default. // // Eduard's original implementation carries the following license, which applies to // the following functions only: // // void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff) // void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads) // void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff) // void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads) // // The MIT License (MIT) // // Copyright (c) 2016 Eduard López // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // //////////////////////////////////////////////////////////////////////////////////// void quickSort_parallel_internal_i_d(int* key,double * value, int left, int right, int cutoff) { int i = left, j = right; int tmp; int pivot = key[(left + right) / 2]; { while (i <= j) { while (key[i] < pivot) i++; while (key[j] > pivot) j--; if (i <= j) { tmp = key[i]; key[i] = key[j]; key[j] = tmp; double tmp_v = value[i]; value[i] = value[j]; value[j] = tmp_v; i++; j--; } } } if ( ((right-left)<cutoff) ){ if (left < j){ quickSort_parallel_internal_i_d(key, value, left, j, cutoff); } if (i < right){ quickSort_parallel_internal_i_d(key, value, i, right, cutoff); } }else{ #pragma omp task { quickSort_parallel_internal_i_d(key, value, left, j, cutoff); } #pragma omp task { quickSort_parallel_internal_i_d(key, value, i, right, cutoff); } } } void quickSort_parallel_i_d(int* key,double * value, int lenArray, int numThreads){ // Set minumum problem size to still spawn threads for int cutoff = 10000; // For this problem size, more than 16 threads on CPU is not helpful if( numThreads > 16 ) numThreads = 16; #pragma omp parallel num_threads(numThreads) { #pragma omp single nowait { quickSort_parallel_internal_i_d(key,value, 0, lenArray-1, cutoff); } } } void quickSort_parallel_internal_d_i(double* key,int * value, int left, int right, int cutoff) { int i = left, j = right; double tmp; double pivot = key[(left + right) / 2]; { while (i <= j) { while (key[i] < pivot) i++; while (key[j] > pivot) j--; if (i <= j) { tmp = key[i]; key[i] = key[j]; key[j] = tmp; int tmp_v = value[i]; value[i] = value[j]; value[j] = tmp_v; i++; j--; } } } if ( ((right-left)<cutoff) ){ if (left < j){ quickSort_parallel_internal_d_i(key, value, left, j, cutoff); } if (i < right){ quickSort_parallel_internal_d_i(key, value, i, right, cutoff); } }else{ #pragma omp task { quickSort_parallel_internal_d_i(key, value, left, j, cutoff); } #pragma omp task { quickSort_parallel_internal_d_i(key, value, i, right, cutoff); } } } void quickSort_parallel_d_i(double* key,int * value, int lenArray, int numThreads){ // Set minumum problem size to still spawn threads for int cutoff = 10000; // For this problem size, more than 16 threads on CPU is not helpful if( numThreads > 16 ) numThreads = 16; #pragma omp parallel num_threads(numThreads) { #pragma omp single nowait { quickSort_parallel_internal_d_i(key,value, 0, lenArray-1, cutoff); } } } //////////////////////////////////////////////////////////////////////////////////// // Optimization 1 -- Event-based Sample/XS Lookup kernel splitting + Sorting // lookups by material and energy //////////////////////////////////////////////////////////////////////////////////// // This kernel separates out the sampling and lookup regions of the event-based // model, and then sorts the lookups by material type and energy. The goal of this // optimization is to allow for greatly improved cache locality, and XS indices // loaded from memory may be re-used for multiple lookups. // // As efficienct sorting is key for performance, we also must implement an // efficient key-value parallel sorting algorithm. We also experimented with using // the C++ version of thrust for these purposes, but found that our own implemtation // was slightly faster than the thrust library version, so for speed and // simplicity we will do not add the thrust dependency. //////////////////////////////////////////////////////////////////////////////////// void run_event_based_simulation_optimization_1(Input in, SimulationData SD, unsigned long * vhash_result ) { char * optimization_name = "Optimization 1 - Kernel splitting + full material & energy sort"; printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// printf("Allocating additional data required by optimized kernel...\n"); size_t sz; size_t total_sz = 0; double start, stop; sz = in.lookups * sizeof(double); SD.p_energy_samples = (double *) malloc(sz); total_sz += sz; SD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); SD.mat_samples = (int *) malloc(sz); total_sz += sz; SD.length_mat_samples = in.lookups; printf("Allocated an additional %.0lf MB of data on CPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Begin Actual Simulation //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Sample Materials and Energies //////////////////////////////////////////////////////////////////////////////// printf("Sampling event data...\n"); #pragma omp parallel for schedule(dynamic, 1000) for( int i = 0; i < in.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); SD.p_energy_samples[i] = p_energy; SD.mat_samples[i] = mat; } printf("Finished sampling.\n"); //////////////////////////////////////////////////////////////////////////////// // Sort by Material //////////////////////////////////////////////////////////////////////////////// start = get_time(); quickSort_parallel_i_d(SD.mat_samples, SD.p_energy_samples, in.lookups, in.nthreads); stop = get_time(); printf("Material sort took %.3lf seconds\n", stop-start); //////////////////////////////////////////////////////////////////////////////// // Sort by Energy //////////////////////////////////////////////////////////////////////////////// start = get_time(); // Count up number of each type of sample. int num_samples_per_mat[12] = {0}; for( int l = 0; l < in.lookups; l++ ) num_samples_per_mat[ SD.mat_samples[l] ]++; // Determine offsets int offsets[12] = {0}; for( int m = 1; m < 12; m++ ) offsets[m] = offsets[m-1] + num_samples_per_mat[m-1]; stop = get_time(); printf("Counting samples and offsets took %.3lf seconds\n", stop-start); start = stop; // Sort each material type by energy level int offset = 0; for( int m = 0; m < 12; m++ ) quickSort_parallel_d_i(SD.p_energy_samples + offsets[m],SD.mat_samples + offsets[m], num_samples_per_mat[m], in.nthreads); stop = get_time(); printf("Energy Sorts took %.3lf seconds\n", stop-start); //////////////////////////////////////////////////////////////////////////////// // Perform lookups for each material separately //////////////////////////////////////////////////////////////////////////////// start = get_time(); unsigned long long verification = 0; // Individual Materials offset = 0; for( int m = 0; m < 12; m++ ) { #pragma omp parallel for schedule(dynamic,100) reduction(+:verification) for( int i = offset; i < offset + num_samples_per_mat[m]; i++) { // load pre-sampled energy and material for the particle double E = SD.p_energy_samples[i]; int mat = SD.mat_samples[i]; double macro_xs_vector[4] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( macro_xs_vector, mat, E, in, SD ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on the verification value. // For accelerators, a different approach might be required // (e.g., atomics, reduction of thread-specific values in large // array via CUDA thrust, etc). double max = -DBL_MAX; int max_idx = 0; for(int j = 0; j < 4; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification += max_idx+1; } offset += num_samples_per_mat[m]; } stop = get_time(); printf("XS Lookups took %.3lf seconds\n", stop-start); *vhash_result = verification; }
pw_multithread.c
#include <omp.h> #include <papi_wrapper.h> #include <stdio.h> #include <stdlib.h> #include "test_lib.h" int main() { long long N = 10; double x[N]; pw_init_instruments; #pragma omp parallel { pw_start_instruments_loop(omp_get_thread_num()); for (int i = 0; i < N; ++i) { x[i] = i * 42.3; x[i] = i / 29.8; } pw_stop_instruments_loop(omp_get_thread_num()); } pw_print_instruments; /* avoid code elimination */ for (int i = 0; i < N; ++i) { if (i % 1000000 == 0) { printf("x[%d]\t%f\n", i, x[i]); } } return pw_test_pass(__FILE__); }
version1_3.c
// Compile with: // // // To specify the number of bodies in the world, the program optionally accepts // an integer as its first command line argument. #include <time.h> #include <sys/times.h> #include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <X11/Xlib.h> #include <unistd.h> #include "omp.h" #define WIDTH 1024 #define HEIGHT 768 // default number of bodies #define DEF_NUM_BODIES 100 // gravitational constant #define GRAV 10.0 // initial velocities are scaled by this value #define V_SCALAR 20.0 // initial masses are scaled by this value #define M_SCALAR 5.0 // radius scalar #define R_SCALAR 3 // coefficient of restitution determines the elasticity of a collision: C_REST = [0,1] // if C_REST = 0 -> perfectly inelastic (particles stick together) // if C_REST = 1 -> perfectly elastic (no loss of speed) #define C_REST 0.5 // set the iteration times #define iteration_times 100 // Must set 0 if run on Pi #define NOT_RUN_ON_PI 1 struct body { double x, y; // position double m; // mass }; struct speed{ double vx,vy; }; struct world { struct body *bodies; int num_bodies; }; struct speed* speed; double * radius; clock_t total_time = 0; //total_time.sec = 0; //total_time.usec = 0; int BLOCK_NUM=8; /* This function initializes each particle's mass, velocity and position */ struct world *create_world(int num_bodies) { struct world *world = malloc(sizeof(struct world)); speed =malloc(sizeof(struct speed)*num_bodies); radius=malloc(sizeof(double)*num_bodies); world->num_bodies = num_bodies; world->bodies = malloc(sizeof(struct body) * num_bodies); int i = 0; double x; double y; double rc; int min_dim = (WIDTH < HEIGHT) ? WIDTH : HEIGHT; while (i < num_bodies) { x = drand48() * WIDTH; y = drand48() * HEIGHT; rc = sqrt((WIDTH / 2 - x) * (WIDTH / 2 - x) + (y - HEIGHT / 2) * (y - HEIGHT / 2)); if (rc <= min_dim / 2) { world->bodies[i].x = x; world->bodies[i].y = y; speed[i].vx = V_SCALAR * (y - HEIGHT / 2) / rc; speed[i].vy = V_SCALAR * (WIDTH / 2 - x) / rc; world->bodies[i].m = (1 / (0.025 + drand48())) * M_SCALAR; radius[i] = sqrt(world->bodies[i].m / M_PI) * R_SCALAR; i++; } } return world; } // set the foreground color given RGB values between 0..255. void set_color(Display *disp, GC gc, int r, int g, int b) { unsigned long int p; if (r < 0) r = 0; else if (r > 255) r = 255; if (g < 0) g = 0; else if (g > 255) g = 255; if (b < 0) b = 0; else if (b > 255) b = 255; p = (r << 16) | (g << 8) | (b); XSetForeground(disp, gc, p); } /* This function updates the screen with the new positions of each particle */ void draw_world(Display *disp, Pixmap back_buf, GC gc, struct world *world) { int i; double x, y, r, r2; // we turn off aliasing for faster draws set_color(disp, gc, 255, 255, 255); XFillRectangle(disp, back_buf, gc, 0, 0, WIDTH, HEIGHT); for (i = 0; i < world->num_bodies; i++) { r = radius[i]; x = world->bodies[i].x - r; y = world->bodies[i].y - r; r2 = r + r; // draw body set_color(disp, gc, 255 * 7 / 10, 255 * 7 / 10, 255 * 7 / 10); XFillArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64); set_color(disp, gc, 0, 0, 0); XDrawArc(disp, back_buf, gc, x, y, r2, r2, 0, 360 * 64); } } void collision_step(struct world *world) { int a, b; double r, x, y, vx, vy; // Impose screen boundaries by reversing direction if body is off screen for (a = 0; a < world->num_bodies; a++) { r = radius[a]; x = world->bodies[a].x; y = world->bodies[a].y; vx = speed[a].vx; vy = speed[a].vy; if (x - r < 0) { // left edge if (vx < 0) { speed[a].vx = -C_REST * vx; } world->bodies[a].x = r; } else if (x + r > WIDTH) { // right edge if (vx > 0) { speed[a].vx = -C_REST * vx; } world->bodies[a].x = WIDTH - r; } if (y - r < 0) { // bottom edge if (vy < 0) { speed[a].vy = -C_REST * vy; } world->bodies[a].y = r; } else if (y + r > HEIGHT) { // top edge if (vy > 0) { speed[a].vy = -C_REST * vy; } world->bodies[a].y = HEIGHT - r; } } } void position_step(struct world *world, double time_res) { /* The forces array stores the x and y components of the total force acting * on each body. The forces are index like this: * F on body i in the x dir = F_x[i] * F on body i in the y dir = F_y[i] */ double *force_x = (double *) malloc(sizeof(double) * world->num_bodies); double *force_y = (double *) malloc(sizeof(double) * world->num_bodies); // initialize all forces to zero force_x = memset(force_x, 0, sizeof(double) * world->num_bodies); force_y = memset(force_y, 0, sizeof(double) * world->num_bodies); /* Compute the net force on each body */ #pragma omp parallel num_threads(8) { int BLOCK_SIZE = world->num_bodies / BLOCK_NUM; int my_rank = omp_get_thread_num(); int num_of_threads = omp_get_num_threads(); int thread_size = world->num_bodies / num_of_threads; for (int k = 0; k < BLOCK_NUM; ++k) { for (int i = my_rank * thread_size; i < (my_rank + 1) * thread_size; i++) { double d, d_cubed, diff_x, diff_y; for (int j = BLOCK_SIZE * k; j < BLOCK_SIZE * (k + 1); j++) { if (i == j) { continue; } // Compute the x and y distances and total distance d between // bodies i and j diff_x = world->bodies[j].x - world->bodies[i].x; diff_y = world->bodies[j].y - world->bodies[i].y; d = sqrt((diff_x * diff_x) + (diff_y * diff_y)); if (d < 25) { d = 25; } d_cubed = d * d * d; // Add force due to j to total force on i force_x[i] += GRAV * (world->bodies[i].m * world->bodies[j].m / d_cubed) * diff_x; force_y[i] += GRAV * (world->bodies[i].m * world->bodies[j].m / d_cubed) * diff_y; } } } #pragma omp barrier for (int i = my_rank * thread_size; i < (my_rank+1) * thread_size; i++) { // Update velocities speed[i].vx += force_x[i] * time_res / world->bodies[i].m; speed[i].vy += force_y[i] * time_res / world->bodies[i].m; // Update positions world->bodies[i].x += speed[i].vx * time_res; world->bodies[i].y += speed[i].vy * time_res; } } } void step_world(struct world *world, double time_res) { struct tms ttt; clock_t start, end; start = times(&ttt); position_step(world, time_res); end = times(&ttt); total_time += end - start; collision_step(world); } /* Main method runs initialize() and update() */ int main(int argc, char **argv) { //total_time.tv_sec = 0; //total_time.tv_usec = 0; /* get num bodies from the command line */ int num_bodies, threads; num_bodies = DEF_NUM_BODIES; threads = 1; if (argc == 2) { num_bodies = atoi(argv[1]); }; int BLOCK_NUM_LIST[7] = {2,4,8,10,16,32,40}; FILE* fstream=fopen("outdata","a+"); fprintf(fstream,"Universe has %d bodies\n", num_bodies); for (int i = 0; i < 7; ++i) { BLOCK_NUM = BLOCK_NUM_LIST[i]; printf("Universe has %d bodies. %d Threads\n", num_bodies, threads); //omp_set_num_threads(threads); /* set up the universe */ time_t cur_time; time(&cur_time); srand48((long) cur_time); // seed the RNG used in create_world struct world *world = create_world(num_bodies); /* set up graphics using Xlib */ #if NOT_RUN_ON_PI Display *disp = XOpenDisplay(NULL); int scr = DefaultScreen(disp); Window win = XCreateSimpleWindow( disp, RootWindow(disp, scr), 0, 0, WIDTH, HEIGHT, 0, BlackPixel(disp, scr), WhitePixel(disp, scr)); XStoreName(disp, win, "N-Body Simulator"); Pixmap back_buf = XCreatePixmap(disp, RootWindow(disp, scr), WIDTH, HEIGHT, DefaultDepth(disp, scr)); GC gc = XCreateGC(disp, back_buf, 0, 0); // Make sure we're only looking for messages about closing the window Atom del_window = XInternAtom(disp, "WM_DELETE_WINDOW", 0); XSetWMProtocols(disp, win, &del_window, 1); XSelectInput(disp, win, StructureNotifyMask); XMapWindow(disp, win); XEvent event; // wait until window is mapped while (1) { XNextEvent(disp, &event); if (event.type == MapNotify) { break; } } #endif struct timespec delay = {0, 1000000000 / 60}; // for 60 FPS struct timespec remaining; double delta_t = 0.1; int ii; total_time=0; for (ii = 0; ii < iteration_times; ii++) { // check if the window has been closed #if NOT_RUN_ON_PI if (XCheckTypedEvent(disp, ClientMessage, &event)) { break; } // we first draw to the back buffer then copy it to the front (`win`) draw_world(disp, back_buf, gc, world); XCopyArea(disp, back_buf, win, gc, 0, 0, WIDTH, HEIGHT, 0, 0); #endif step_world(world, delta_t); //if you want to watch the process in 60 FPS //nanosleep(&delay, &remaining); } // printf("Total Time = %f\n", (double)total_time.tv_sec + (double)total_time.tv_usec/1000000); fprintf(fstream,"%d %lfs\n", threads,(double) total_time / (sysconf(_SC_CLK_TCK))); #if NOT_RUN_ON_PI XFreeGC(disp, gc); XFreePixmap(disp, back_buf); XDestroyWindow(disp, win); XCloseDisplay(disp); #endif } fclose(fstream); return 0; }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, RowMajor, ResInnerStride> { typedef gebp_traits <RhsScalar, LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar *lhs, Index lhsStride, const RhsScalar *rhs, Index rhsStride, ResScalar *res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<RhsScalar, LhsScalar> &blocking, GemmParallelInfo <Index> *info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder == RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor, ResInnerStride> ::run(cols, rows, depth, rhs, rhsStride, lhs, lhsStride, res, resIncr, resStride, alpha, blocking, info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs, int ResInnerStride> struct general_matrix_matrix_product<Index, LhsScalar, LhsStorageOrder, ConjugateLhs, RhsScalar, RhsStorageOrder, ConjugateRhs, ColMajor, ResInnerStride> { typedef gebp_traits <LhsScalar, RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar *_lhs, Index lhsStride, const RhsScalar *_rhs, Index rhsStride, ResScalar *_res, Index resIncr, Index resStride, ResScalar alpha, level3_blocking<LhsScalar, RhsScalar> &blocking, GemmParallelInfo <Index> *info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor, Unaligned, ResInnerStride> ResMapper; LhsMapper lhs(_lhs, lhsStride); RhsMapper rhs(_rhs, rhsStride); ResMapper res(_res, resStride, resIncr); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows, blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols, blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc * mc; std::size_t sizeB = kc * nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc != rows && kc == depth && nc == cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for (Index i2 = 0; i2 < rows; i2 += mc) { const Index actual_mc = (std::min)(i2 + mc, rows) - i2; for (Index k2 = 0; k2 < depth; k2 += kc) { const Index actual_kc = (std::min)(k2 + kc, depth) - k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2, k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for (Index j2 = 0; j2 < cols; j2 += nc) { const Index actual_nc = (std::min)(j2 + nc, cols) - j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if ((!pack_rhs_once) || i2 == 0) pack_rhs(blockB, rhs.getSubMapper(k2, j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs &lhs, const Rhs &rhs, Dest &dest, const Scalar &actualAlpha, BlockingType &blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator()(Index row, Index rows, Index col = 0, Index cols = -1, GemmParallelInfo <Index> *info = 0) const { if (cols == -1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row, 0), m_lhs.outerStride(), &m_rhs.coeffRef(0, col), m_rhs.outerStride(), (Scalar *) &(m_dest.coeffRef(row, col)), m_dest.innerStride(), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs &m_lhs; const Rhs &m_rhs; Dest &m_dest; Scalar m_actualAlpha; BlockingType &m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor = 1, bool FiniteAtCompileTime = MaxRows != Dynamic && MaxCols != Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar *m_blockA; RhsScalar *m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar *blockA() { return m_blockA; } inline RhsScalar *blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type, typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> { enum { Transpose = StorageOrder == RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar; typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar; typedef gebp_traits <LhsScalar, RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder, _LhsScalar, _RhsScalar, MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder == RowMajor, _RhsScalar, _LhsScalar>::type, typename conditional<StorageOrder == RowMajor, _LhsScalar, _RhsScalar>::type> { enum { Transpose = StorageOrder == RowMajor }; typedef typename conditional<Transpose, _RhsScalar, _LhsScalar>::type LhsScalar; typedef typename conditional<Transpose, _LhsScalar, _RhsScalar>::type RhsScalar; typedef gebp_traits <LhsScalar, RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if (l3_blocking) { computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA == 0 && this->m_blockB == 0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar, RhsScalar, KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if (this->m_blockA == 0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if (this->m_blockB == 0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs, Rhs, DenseShape, DenseShape, GemmProduct> : generic_product_impl_base<Lhs, Rhs, generic_product_impl<Lhs, Rhs, DenseShape, DenseShape, GemmProduct> > { typedef typename Product<Lhs, Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime, Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs, Rhs, DenseShape, DenseShape, CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst &dst, const Lhs &lhs, const Rhs &rhs) { if ((rhs.rows() + dst.rows() + dst.cols()) < 20 && rhs.rows() > 0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::assign_op<typename Dst::Scalar, Scalar>()); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst &dst, const Lhs &lhs, const Rhs &rhs) { if ((rhs.rows() + dst.rows() + dst.cols()) < 20 && rhs.rows() > 0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::add_assign_op<typename Dst::Scalar, Scalar>()); else scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst &dst, const Lhs &lhs, const Rhs &rhs) { if ((rhs.rows() + dst.rows() + dst.cols()) < 20 && rhs.rows() > 0) lazyproduct::eval_dynamic(dst, lhs, rhs, internal::sub_assign_op<typename Dst::Scalar, Scalar>()); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest &dst, const Lhs &a_lhs, const Rhs &a_rhs, const Scalar &alpha) { eigen_assert(dst.rows() == a_lhs.rows() && dst.cols() == a_rhs.cols()); if (a_lhs.cols() == 0 || a_lhs.rows() == 0 || a_rhs.cols() == 0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags & RowMajorBit) ? RowMajor : ColMajor, LhsScalar, RhsScalar, Dest::MaxRowsAtCompileTime, Dest::MaxColsAtCompileTime, MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags & RowMajorBit) ? RowMajor : ColMajor, bool( LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags & RowMajorBit) ? RowMajor : ColMajor, bool( RhsBlasTraits::NeedToConjugate), (Dest::Flags & RowMajorBit) ? RowMajor : ColMajor, Dest::InnerStrideAtCompileTime>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime > 32 || Dest::MaxRowsAtCompileTime == Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags & RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
mm.c
#include <stdio.h> #include <stdlib.h> void mm(double* a, double* b, double* c, int width) { #pragma omp parallel for collapse(2) for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { double sum = 0; #pragma omp parallel for reduction(+:sum) for (int k = 0; k < width; k++) { double x = a[i * width + k]; double y = b[k * width + j]; sum += x * y; } c[i * width + j] = sum; } } } int main() { int width = 500; double *a = (double*) malloc (width * width * sizeof(double)); double *b = (double*) malloc (width * width * sizeof(double)); double *c = (double*) malloc (width * width * sizeof(double)); #pragma omp parallel for collapse(2) for(int i = 0; i < width; i++) { for(int j = 0; j < width; j++) { a[i*width+j] = i; b[i*width+j] = j; c[i*width+j] = 0; } } mm(a,b,c,width); //for(int i = 0; i < width; i++) { // for(int j = 0; j < width; j++) { // printf("\n c[%d][%d] = %f",i,j,c[i*width+j]); // } //} }
timing.c
/** * * @file timing.c * * PLASMA auxiliary routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Mathieu Faverge * @author Dulceneia Becker * @date 2010-11-15 * **/ #define _FMULS FMULS_GEQRF( M, N ) #define _FADDS FADDS_GEQRF( M, N ) #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef PLASMA_EZTRACE #include <eztrace.h> #endif #include <unistd.h> #include <sys/time.h> #include <sys/resource.h> #ifdef USE_MKL #include <mkl_cblas.h> #include <mkl_lapacke.h> #else #include <cblas.h> #include <lapacke.h> #endif #include "plasma.h" #include "core_blas.h" #include "flops.h" #include "timing.h" #include "auxiliary.h" #include "main.h" #include "workspace.h" #include "Utils.h" #define EPSILON 1.0E-9 static double RunTest(double *t_, struct user_parameters*); static double RunTest_dpotrf(real_Double_t *t_, struct user_parameters* params) { double t; int64_t N = params->matrix_size; int64_t NB = params->blocksize; int check = params->check; int uplo = PlasmaUpper; double check_res = 0; /* Allocate Data */ PLASMA_desc *descA = NULL; double* ptr = malloc(N * N * sizeof(double)); PLASMA_Desc_Create(&descA, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, N, 0, 0, N, N); #pragma omp parallel #pragma omp master plasma_pdplgsy_quark( (double)N, *descA, 51 ); /* Save A for check */ double *A = NULL; if(check) { A = (double*)malloc(N * N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descA, (void*)A, N); } double t_start, t_end; //START_TIMING(); t_start = rtclock(); #pragma omp parallel #pragma omp master plasma_pdpotrf_quark(uplo, *descA); //STOP_TIMING(); t_end = rtclock(); *t_ = t_end - t_start; /* Check the solution */ if ( check ) { PLASMA_desc *descB = NULL; double* ptr = (double*)malloc(N * sizeof(double)); PLASMA_Desc_Create(&descB, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, 1, 0, 0, N, 1); plasma_pdpltmg_seq(* descB, 7672 ); double* B = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)B, N); PLASMA_dpotrs_Tile( uplo, descA, descB ); double* X = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)X, N); check_res = d_check_solution(N, N, 1, A, N, B, X, N); PASTE_CODE_FREE_MATRIX( descB ); free( A ); free( B ); free( X ); } PASTE_CODE_FREE_MATRIX( descA ); return check_res; } static double RunTest_dgetrf(real_Double_t *t_, struct user_parameters* params) { double t; int64_t N = params->matrix_size; int64_t NB = params->blocksize; int check = params->check; double check_res = 0; /* Allocate Data */ PLASMA_desc *descA = NULL; double* ptr = malloc(N * N * sizeof(double)); PLASMA_Desc_Create(&descA, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, N, 0, 0, N, N); int* piv = (int*)malloc(N * sizeof(double)); #pragma omp parallel #pragma omp master plasma_pdpltmg_quark(*descA, 3456); /* Save AT in lapack layout for check */ double *A = NULL; if(check) { A = (double*)malloc(N * N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descA, (void*)A, N); } double t_start, t_end; //START_TIMING(); t_start = rtclock(); #pragma omp parallel #pragma omp master plasma_pdgetrf_rectil_quark(*descA, piv); //STOP_TIMING(); t_end = rtclock(); *t_ = t_end - t_start; /* Check the solution */ if ( check ) { PLASMA_desc *descB = NULL; double* ptr = (double*)malloc(N * sizeof(double)); PLASMA_Desc_Create(&descB, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, 1, 0, 0, N, 1); plasma_pdpltmg_seq(*descB, 7732 ); double* b = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)b, N); PLASMA_dgetrs_Tile( PlasmaNoTrans, descA, piv, descB ); double* x = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)x, N); check_res = d_check_solution(N, N, 1, A, N, b, x, N); PASTE_CODE_FREE_MATRIX( descB ); free(A); free(b); free(x); } PASTE_CODE_FREE_MATRIX( descA ); free( piv ); return check_res; } static double RunTest_dgeqrf(real_Double_t *t_, struct user_parameters* params) { double t; PLASMA_desc *descT; int64_t N = params->matrix_size; int64_t IB = params->iblocksize; int64_t NB = params->blocksize; int check = params->check; double check_res = 0; /* Allocate Data */ PLASMA_desc *descA = NULL; double *ptr = (double*)malloc(N * N * sizeof(double)); PLASMA_Desc_Create(&descA, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, N, 0, 0, N, N); #pragma omp parallel #pragma omp master plasma_pdpltmg_quark(*descA, 5373 ); /* Save A for check */ double *A = NULL; if ( check ) { A = (double*)malloc(N * N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descA, (void*)A, N); } /* Allocate Workspace */ plasma_alloc_ibnb_tile(N, N, PlasmaRealDouble, &descT, IB, NB); double t_start, t_end; //START_TIMING(); t_start = rtclock(); #pragma omp parallel #pragma omp master plasma_pdgeqrf_quark( *descA, *descT , IB); //STOP_TIMING(); t_end = rtclock(); *t_ = t_end - t_start; /* Check the solution */ if ( check ) { /* Allocate B for check */ PLASMA_desc *descB = NULL; double* ptr = (double*)malloc(N * sizeof(double)); PLASMA_Desc_Create(&descB, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, 1, 0, 0, N, 1); /* Initialize and save B */ plasma_pdpltmg_seq(*descB, 2264 ); double *B = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)B, N); /* Compute the solution */ PLASMA_dgeqrs_Tile( descA, descT, descB , IB); /* Copy solution to X */ double *X = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)X, N); check_res = d_check_solution(N, N, 1, A, N, B, X, N); /* Free checking structures */ PASTE_CODE_FREE_MATRIX( descB ); free( A ); free( B ); free( X ); } /* Free data */ PLASMA_Dealloc_Handle_Tile(&descT); PASTE_CODE_FREE_MATRIX( descA ); return check_res; } int ISEED[4] = {0,0,0,1}; /* initial seed for zlarnv() */ double run(struct user_parameters* params) { double t; double fmuls, fadds; double flops; params->succeed = 1; int type = params->type; if (params->matrix_size <= 0) { params->matrix_size = 2048; } if (params->blocksize <= 0) { params->blocksize = 128; } //ifdef if (params->iblocksize <= 0) { params->iblocksize = params->blocksize; } if (params->type <= 0) { params->type = 1; type = params->type; } int64_t N = params->matrix_size; int64_t M = params->matrix_size; fadds = (double)(_FADDS); fmuls = (double)(_FMULS); flops = 1e-9 * (fmuls + fadds); //mod aqui if (type == 1) { if (RunTest_dgeqrf(&t, params) > EPSILON && params->check) params->succeed = 0; } else if (type == 2) { if (RunTest_dgetrf(&t, params) > EPSILON && params->check) params->succeed = 0; } else if (type == 3) { if (RunTest_dpotrf(&t, params) > EPSILON && params->check) params->succeed = 0; } // return gflops return flops / t; }
lastprivate-conditional-3.c
void foo (int *p) { int i, j, k; #pragma omp parallel { #pragma omp for lastprivate (conditional: i) /* { dg-warning "conditional 'lastprivate' on loop iterator 'i' ignored" } */ for (i = 0; i < 32; i++) ; #pragma omp for collapse (3) lastprivate (conditional: i) /* { dg-warning "conditional 'lastprivate' on loop iterator 'i' ignored" } */ for (i = 0; i < 32; i++) for (j = 0; j < 32; ++j) for (k = 0; k < 2; ++k) ; #pragma omp for collapse (3) lastprivate (conditional: j) /* { dg-warning "conditional 'lastprivate' on loop iterator 'j' ignored" } */ for (i = 0; i < 32; i++) for (j = 0; j < 32; ++j) for (k = 0; k < 2; ++k) ; #pragma omp for collapse (3) lastprivate (conditional: k) /* { dg-warning "conditional 'lastprivate' on loop iterator 'k' ignored" } */ for (i = 0; i < 32; i++) for (j = 0; j < 32; ++j) for (k = 0; k < 2; ++k) ; } #pragma omp parallel for lastprivate (conditional: i) /* { dg-warning "conditional 'lastprivate' on loop iterator 'i' ignored" } */ for (i = 0; i < 32; i++) ; #pragma omp parallel for collapse (3) lastprivate (conditional: i) /* { dg-warning "conditional 'lastprivate' on loop iterator 'i' ignored" } */ for (i = 0; i < 32; i++) for (j = 0; j < 32; ++j) for (k = 0; k < 2; ++k) ; #pragma omp parallel for collapse (3) lastprivate (conditional: j) /* { dg-warning "conditional 'lastprivate' on loop iterator 'j' ignored" } */ for (i = 0; i < 32; i++) for (j = 0; j < 32; ++j) for (k = 0; k < 2; ++k) ; #pragma omp parallel for collapse (3) lastprivate (conditional: k) /* { dg-warning "conditional 'lastprivate' on loop iterator 'k' ignored" } */ for (i = 0; i < 32; i++) for (j = 0; j < 32; ++j) for (k = 0; k < 2; ++k) ; }
simd4.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ /* { dg-additional-options "-std=c99" { target c } } */ struct S *p; /* { dg-message "forward declaration" "" { target c++ } } */ float f; int j; void foo (void) { #pragma omp simd linear(p) linear(f : 1) for (int i = 0; i < 10; i++) ; #pragma omp simd linear(j : 7.0) /* { dg-error "step expression must be integral" } */ for (int i = 0; i < 10; i++) ; } /* { dg-error "linear clause applied to" "" { target *-*-* } 12 } */ /* { dg-error "(incomplete|undefined) type" "" { target *-*-* } 12 } */
residualbased_elimination_quasiincompresible_builder_and_solver.h
/* ********************************************************* * * Last Modified by: $Author: anonymous $ * Date: $Date: 2009-01-15 14:50:24 $ * Revision: $Revision: 1.12 $ * * ***********************************************************/ #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_QUASI_INCOMPRESSIBLE_BUILDER_AND_SOLVER ) #define KRATOS_RESIDUAL_BASED_ELIMINATION_QUASI_INCOMPRESSIBLE_BUILDER_AND_SOLVER /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ // #include "boost/smart_ptr.hpp" #include <pybind11/pybind11.h> #include "includes/define.h" #include "includes/define_python.h" /* Project includes */ #include "includes/define.h" #include "ULF_application.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "utilities/geometry_utilities.h" #include "boost/smart_ptr.hpp" #include "utilities/timer.h" namespace Kratos { /**@name Kratos Globals */ /*@{ */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ /**@name Enum's */ /*@{ */ /*@} */ /**@name Functions */ /*@{ */ /*@} */ /**@name Kratos Classes */ /*@{ */ /** Short class definition. Detail class definition. \URL[Example of use html]{ extended_documentation/no_ex_of_use.html} \URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf} \URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc} \URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps} \URL[Extended documentation html]{ extended_documentation/no_ext_doc.html} \URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf} \URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc} \URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps} */ template < class TSparseSpace, class TDenseSpace , class TLinearSolver, int TDim > class ResidualBasedEliminationQuasiIncompressibleBuilderAndSolver : public BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver > { public: /**@name Type Definitions */ /*@{ */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedEliminationQuasiIncompressibleBuilderAndSolver ); typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodesArrayType NodesContainerType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; /*@} */ /**@name Life Cycle */ /*@{ */ /** Constructor. */ ResidualBasedEliminationQuasiIncompressibleBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver) { } /** Destructor. */ ~ResidualBasedEliminationQuasiIncompressibleBuilderAndSolver() override {} /*@} */ /**@name Operators */ /*@{ */ //************************************************************************** //************************************************************************** void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY KRATOS_THROW_ERROR(std::runtime_error, "For the quasi incompressible builder and solver this fct doesnt exist!", ""); KRATOS_CATCH("") } void InitializeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY //KRATOS_WATCH("Initialize Solution Step::: EMPTY FUNCTION FOR THIS SOLVER") KRATOS_CATCH("") } void FinalizeSolutionStep( ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY //KRATOS_WATCH("Finalize Solution Step:::EMPTY FUNCTION FOR THIS SOLVER") KRATOS_CATCH("") } //************************************************************************** //************************************************************************** //this is done in a purely nodal way taking advantage of the neighbour relatinoships //which are assumed to be calculated separately //HERE we store the displacements variables in a list void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part ) { KRATOS_TRY //KRATOS_WATCH("ENTERED SETUP DOFSET OF BUILDER AND SOLVER OF ULF") mActiveNodes.clear(); mActiveNodes.reserve(r_model_part.Nodes().size() ); for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { mActiveNodes.push_back(*(it.base() )); } } //getting the dof position //unsigned int dof_position = (mActiveNodes.begin())->GetDofPosition(PRESSURE); //fills the DofList and give a unique progressive tag to each node BaseType::mDofSet.clear(); BaseType::mDofSet.reserve(mActiveNodes.size()*TDim ); for(WeakPointerVector< Node<3> >::iterator iii = mActiveNodes.begin(); iii!=mActiveNodes.end(); iii++) { BaseType::mDofSet.push_back( iii->pGetDof(DISPLACEMENT_X).get()); BaseType::mDofSet.push_back( iii->pGetDof(DISPLACEMENT_Y).get()); //BaseType::mDofSet.push_back( iii->pGetDof(DISPLACEMENT_Y)); if (TDim==3) BaseType::mDofSet.push_back( iii->pGetDof(DISPLACEMENT_Z).get()); } this->mEquationSystemSize = BaseType::mDofSet.size(); if (BaseType::mDofSet.size()==0) KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", ""); BaseType::mDofSetIsInitialized = true; //KRATOS_WATCH("FINISHED SETUP DOFSET OF BUILDER AND SOLVER OF ULF") //BELOW IS THE OLD VERSION /* //count dofs mnumber_of_active_nodes = 0; for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { mnumber_of_active_nodes += 1; } } //getting the dof position //unsigned int dof_position = r_model_part.NodesBegin()->GetDofPosition(PRESSURE); //fills the DofList BaseType::mDofSet.clear(); BaseType::mDofSet.reserve( mnumber_of_active_nodes * TDim ); int FractionalStepNumber = r_model_part.GetProcessInfo()[FRACTIONAL_STEP]; KRATOS_WATCH(FractionalStepNumber); if(TDim == 2) { for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_X) ); BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_Y) ); } } } else if(TDim == 3) { for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_X) ); BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_Y) ); BaseType::mDofSet.push_back( it->pGetDof(DISPLACEMENT_Z) ); } } } //before it was like that: //this->mEquationSystemSize = rDofSet.size(); this->mEquationSystemSize = BaseType::mDofSet.size(); //throws an execption if there are no Degrees of freedom involved in the analysis if (BaseType::mDofSet.size()==0) KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", ""); BaseType::mDofSetIsInitialized = true; */ KRATOS_CATCH("") } //************************************************************************** //************************************************************************** //this function numbers the DOFS - from 1 onwards... (note, that only DISPLACEMENT DOFs are stored, PRESSURE is not!!!!) void SetUpSystem( ModelPart& r_model_part ) { KRATOS_TRY //assing id to the nodes unsigned int index = 0; for(typename DofsArrayType::iterator i_dof = BaseType::mDofSet.begin() ; i_dof != BaseType::mDofSet.end() ; ++i_dof) { //i_dof->EquationId() = index; i_dof->SetEquationId(index) ; index++; } KRATOS_CATCH(""); } //************************************************************************** //************************************************************************** // void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixType& A, TSystemMatrixType& mD, TSystemVectorType& Dx, TSystemVectorType& b, TSystemMatrixType& mMconsistent, TSystemVectorType& mMdiagInv, ModelPart& rModelPart ) { KRATOS_TRY //resizing the system vectors and matrix if (A.size1() == 0 || this->GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(this->mEquationSystemSize,this->mEquationSystemSize,false); //ConstructMatrixStructure(A); } if(Dx.size() != this->mEquationSystemSize) Dx.resize(this->mEquationSystemSize,false); if(b.size() != this->mEquationSystemSize) b.resize(this->mEquationSystemSize,false); if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) ); BaseType::mpReactionsVector.swap(pNewReactionsVector); } //resize auxiliaries unsigned int reduced_dim = this->mEquationSystemSize / TDim; if(mD.size1() != reduced_dim) mD.resize(reduced_dim,this->mEquationSystemSize,false); if(mMconsistent.size1() != reduced_dim) mMconsistent.resize(reduced_dim,reduced_dim,false); if(mMdiagInv.size() != reduced_dim ) mMdiagInv.resize(reduced_dim,false); KRATOS_CATCH("") } void Build( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY if(!pScheme) KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); #ifndef _OPENMP //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); // clean local elemental memory pScheme->CleanMemory(*it); } LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleLHS(A,LHS_Contribution,EquationId); AssembleRHS(b,RHS_Contribution,EquationId); } #else //creating an array of lock variables of the size of the system matrix std::vector< omp_lock_t > lock_array(A.size1()); int A_size = A.size1(); for (int i = 0; i < A_size; i++) omp_init_lock(&lock_array[i]); //create a partition of the element array int number_of_threads = omp_get_max_threads(); vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); //KRATOS_WATCH(number_of_threads); //KRATOS_WATCH(element_partition); double start_prod = omp_get_wtime(); #pragma omp parallel for for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1]; // assemble all elements for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array); /* double aaaa = TSparseSpace::TwoNorm(b); if (TSparseSpace::TwoNorm(b) == aaaa + 1000000000000000000.0) { KRATOS_WATCH((*it)->Id()) KRATOS_THROW_ERROR(std::logic_error, "Something is wrong: fluid element cannot have all 3 nodes at the FSI boundary " , ""); } */ // clean local elemental memory pScheme->CleanMemory(*it); // #pragma omp critical // { // //assemble the elemental contribution // AssembleLHS(A,LHS_Contribution,EquationId); // AssembleRHS(b,RHS_Contribution,EquationId); // // // clean local elemental memory // pScheme->CleanMemory(*it); // } } } //KRATOS_WATCH("Finished assembling of builder and solver") vector<unsigned int> condition_partition; CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition); #pragma omp parallel for for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Condition::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ConditionsArrayType::ptr_iterator it_begin = ConditionsArray.ptr_begin() + condition_partition[k]; typename ConditionsArrayType::ptr_iterator it_end = ConditionsArray.ptr_begin() + condition_partition[k + 1]; // assemble all elements for (typename ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array); // #pragma omp critical // { // //assemble the elemental contribution // AssembleLHS(A,LHS_Contribution,EquationId); // AssembleRHS(b,RHS_Contribution,EquationId); // } } } double stop_prod = omp_get_wtime(); std::cout << "time: " << stop_prod - start_prod << std::endl; for (int i = 0; i < A_size; i++) omp_destroy_lock(&lock_array[i]); //KRATOS_WATCH("finished parallel building"); // //ensure that all the threads are syncronized here // #pragma omp barrier #endif KRATOS_CATCH("") } /*@} */ /**@name Operations */ /*@{ */ /*@} */ /**@name Access */ /*@{ */ /*@} */ /**@name Inquiry */ /*@{ */ /*@} */ /**@name Friends */ /*@{ */ /*@} */ protected: /**@name Protected static Member Variables */ /*@{ */ /*@} */ /**@name Protected member Variables */ /*@{ */ /*@} */ /**@name Protected Operators*/ /*@{ */ /*@} */ /**@name Protected Operations*/ /*@{ */ /*@} */ /**@name Protected Access */ /*@{ */ /*@} */ /**@name Protected Inquiry */ /*@{ */ /*@} */ /**@name Protected LifeCycle */ /*@{ */ /*@} */ public: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ TSystemMatrixType mD; TSystemMatrixType mMconsistent; TSystemVectorType mMdiagInv; TSystemVectorType mpreconditioner; unsigned int mnumber_of_active_nodes; WeakPointerVector<Node<3> > mActiveNodes; //private: /**@name Static Member Variables */ /*@{ */ /*@} */ /**@name Member Variables */ /*@{ */ // WeakPointerVector<Node<3> > mActiveNodes; /*@} */ /**@name Private Operations*/ /*@{ */ //************************************************************************** inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } #ifdef _OPENMP void Assemble( TSystemMatrixType& A, TSystemVectorType& b, const LocalSystemMatrixType& LHS_Contribution, const LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, std::vector< omp_lock_t >& lock_array ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { omp_set_lock(&lock_array[i_global]); b[i_global] += RHS_Contribution(i_local); for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } omp_unset_lock(&lock_array[i_global]); } //note that computation of reactions is not performed here! } } void AssembleRHS_parallel( TSystemVectorType& b, const LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, std::vector< omp_lock_t >& lock_array ) { unsigned int local_size = RHS_Contribution.size(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { omp_set_lock(&lock_array[i_global]); b[i_global] += RHS_Contribution(i_local); omp_unset_lock(&lock_array[i_global]); } //note that computation of reactions is not performed here! } } #endif //************************************************************************** void AssembleLHS( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) { for (unsigned int j_local=0; j_local<local_size; j_local++) { unsigned int j_global=EquationId[j_local]; if ( j_global < BaseType::mEquationSystemSize ) A(i_global,j_global) += LHS_Contribution(i_local,j_local); } } } } //************************************************************************** void AssembleRHS( TSystemVectorType& b, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = RHS_Contribution.size(); for (unsigned int i_local=0; i_local<local_size; i_local++) { unsigned int i_global=EquationId[i_local]; if ( i_global < BaseType::mEquationSystemSize ) //on all DOFS { // ASSEMBLING THE SYSTEM VECTOR b[i_global] += RHS_Contribution[i_local]; } } } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { //KRATOS_WATCH(b); TSparseSpace::SetToZero(b); //KRATOS_WATCH("Calculating REACTIONSSSSSSSS") //reset the reactions to zero in all the nodes for (typename NodesArrayType::iterator node_iterator =r_model_part.NodesBegin(); node_iterator !=r_model_part.NodesEnd(); ++node_iterator) { node_iterator->FastGetSolutionStepValue(REACTION_X)=0.0; node_iterator->FastGetSolutionStepValue(REACTION_Y)=0.0; node_iterator->FastGetSolutionStepValue(REACTION_Z)=0.0; } //refresh RHS to have the correct reactions BuildRHS(pScheme,r_model_part,b); //KRATOS_WATCH(b) /* for (typename NodesArrayType::iterator node_iterator =r_model_part.NodesBegin(); node_iterator !=r_model_part.NodesEnd(); ++node_iterator) { //not adding thelonely nodes: if( node_iterator->FastGetSolutionStepValue(IS_INTERFACE)==1.0 ) { //we add one because we have to account for the contribution of the node itself unsigned int eq_id=(node_iterator->GetDof(DISPLACEMENT_X)).EquationId(); node_iterator->FastGetSolutionStepValue(REACTION_X)=b[eq_id]; eq_id=(node_iterator->GetDof(DISPLACEMENT_Y)).EquationId(); node_iterator->FastGetSolutionStepValue(REACTION_Y)=b[eq_id]; } } */ //array_1d<double, 3> ReactionsVec; typename DofsArrayType::ptr_iterator it2; for (it2=BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2) { //JUST FOR ONE EXAMPLE - Turek (otherwise the below is correct) if ( (*it2)->IsFixed() ) { unsigned int eq_id=(*it2)->EquationId(); //KRATOS_WATCH(eq_id) //KRATOS_WATCH(b[eq_id]) (*it2)->GetSolutionStepReactionValue() = b[eq_id]; //KRATOS_WATCH((*it2)->GetSolutionStepReactionValue()) } // } } //************************************************************************** //************************************************************************** void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b) { KRATOS_TRY //Getting the Elements ElementsArrayType& pElements = r_model_part.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); #ifndef _OPENMP ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); //resetting to zero the vector of reactions TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) ); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b,RHS_Contribution,EquationId); } LHS_Contribution.resize(0,0,false); RHS_Contribution.resize(0,false); // assemble all conditions for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b,RHS_Contribution,EquationId); } #else //creating an array of lock variables of the size of the system matrix std::vector< omp_lock_t > lock_array(b.size()); int b_size = b.size(); for (int i = 0; i < b_size; i++) omp_init_lock(&lock_array[i]); //create a partition of the element array int number_of_threads = omp_get_max_threads(); vector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); //KRATOS_WATCH(number_of_threads); //KRATOS_WATCH(element_partition); double start_prod = omp_get_wtime(); #pragma omp parallel for for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1]; // assemble all elements for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS_parallel(b, RHS_Contribution, EquationId, lock_array); /* double aaaa = TSparseSpace::TwoNorm(b); if (TSparseSpace::TwoNorm(b) == aaaa + 1000000000000000000.0) { KRATOS_WATCH((*it)->Id()) KRATOS_THROW_ERROR(std::logic_error, "Something is wrong: fluid element cannot have all 3 nodes at the FSI boundary " , ""); } */ // clean local elemental memory pScheme->CleanMemory(*it); // #pragma omp critical // { // //assemble the elemental contribution // AssembleLHS(A,LHS_Contribution,EquationId); // AssembleRHS(b,RHS_Contribution,EquationId); // // // clean local elemental memory // pScheme->CleanMemory(*it); // } } } //KRATOS_WATCH("Finished assembling of builder and solver") vector<unsigned int> condition_partition; CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition); #pragma omp parallel for for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Condition::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ConditionsArrayType::ptr_iterator it_begin = ConditionsArray.ptr_begin() + condition_partition[k]; typename ConditionsArrayType::ptr_iterator it_end = ConditionsArray.ptr_begin() + condition_partition[k + 1]; // assemble all elements for (typename ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo); //assemble the elemental contribution AssembleRHS_parallel(b, RHS_Contribution, EquationId, lock_array); // #pragma omp critical // { // //assemble the elemental contribution // AssembleLHS(A,LHS_Contribution,EquationId); // AssembleRHS(b,RHS_Contribution,EquationId); // } } } double stop_prod = omp_get_wtime(); std::cout << "time: " << stop_prod - start_prod << std::endl; for (int i = 0; i < b_size; i++) omp_destroy_lock(&lock_array[i]); //KRATOS_WATCH("finished parallel building"); // //ensure that all the threads are syncronized here // #pragma omp barrier #endif KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void ConstructMatrixStructure( TSystemMatrixType& A, ModelPart& r_model_part ) { KRATOS_TRY //KRATOS_WATCH("Started constructing MAT STRUC") std::vector<int> indices; indices.reserve(1000); //count non zeros int total_nnz = 0; for (typename NodesArrayType::iterator node_iterator =r_model_part.NodesBegin(); node_iterator !=r_model_part.NodesEnd(); ++node_iterator) { //not adding thelonely nodes: if( (node_iterator->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { //we add one because we have to account for the contribution of the node itself total_nnz +=1+(node_iterator->GetValue(NEIGHBOUR_NODES)).size(); } } //reserve space in the matrix A.reserve(total_nnz* TDim * TDim,false); unsigned int row_index; //fill the matrix row by row unsigned int dof_position = r_model_part.NodesBegin()->GetDofPosition(DISPLACEMENT_X); for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { WeakPointerVector< Node<3> >& neighb_nodes = it->GetValue(NEIGHBOUR_NODES); if( neighb_nodes.size() != 0 ) { //first row in the block row_index = it->GetDof(DISPLACEMENT_X,dof_position).EquationId(); //add id of the current node //NOTE: here and in the following we ASSUME that the ids of DISPLACEMENT_X _Y and _Z are sequential for(unsigned int kk = 0; kk<TDim; kk++) { indices.push_back(row_index + kk); } //filling and order the first neighbours list for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { unsigned int tmp = (i->GetDof(DISPLACEMENT_X,dof_position)).EquationId(); for(unsigned int kk = 0; kk<TDim; kk++) { indices.push_back(tmp + kk); } } std::sort(indices.begin(),indices.end()); //fill in the system matrix A for(unsigned int kk = 0; kk<TDim; kk++) { for(unsigned int j=0; j<indices.size(); j++) { A.push_back(row_index + kk,indices[j] , 0.00); } } //clean the indices (it is a work array) indices.erase(indices.begin(),indices.end()); } } //KRATOS_WATCH("FINISHED constructing MAT STRUC") KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void ConstructMatrixStructure_Mconsistent( TSystemMatrixType& Mconsistent, ModelPart& r_model_part) { KRATOS_TRY //KRATOS_WATCH("Started constructing MAT STRUC M CONSISTENT") std::vector<int> indices; indices.reserve(1000); //KRATOS_WATCH("contruct matrix structure Mconsistent 0") int total_nnz = 0; for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { //not to do include lonely nodes in the system if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { //we add one because we have to account for the contribution of the node itself total_nnz += 1+(it->GetValue(NEIGHBOUR_NODES)).size(); } } Mconsistent.reserve(total_nnz,false); unsigned int row_index; //fill the matrix row by row unsigned int dof_position = r_model_part.NodesBegin()->GetDofPosition(DISPLACEMENT_X); for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { WeakPointerVector< Node<3> >& neighb_nodes = it->GetValue(NEIGHBOUR_NODES); if( neighb_nodes.size() != 0 ) { //first row in the block row_index = it->GetDof(DISPLACEMENT_X,dof_position).EquationId(); //add id of the current node //NOTE: here and in the following we ASSUME that the ids of DISPLACEMENT_X _Y and _Z are sequential //we store in the array of indices the column numbers of the pressure index of the respective node, which coincides //with the index of DISP_X, divided by TDim (pressure is scalar - no need to store 2 more indices, as it was in //the case of vector (displ) //CHECK THIS!!! //indices.push_back(row_index/3.0); indices.push_back(row_index/TDim); //filling and order the first neighbours list for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { unsigned int tmp = (i->GetDof(DISPLACEMENT_X,dof_position)).EquationId(); indices.push_back(tmp/TDim); } std::sort(indices.begin(),indices.end()); //fill M (the consistent mass matrix)-note that the "pressure index" is assumed to concide the DISPLACEMENT_X index divided by 3 for(unsigned int j=0; j<indices.size(); j++) { Mconsistent.push_back(row_index/TDim, indices[j] , 0.00); //KRATOS_WATCH(Mconsistent) } //clean the indices (it is a work array) indices.erase(indices.begin(),indices.end()); } } //KRATOS_WATCH("FInished constructing MAT STRUC M CONSISTENT") KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void ConstructMatrixStructure_DivergenceMatrixD( TSystemMatrixType& mD, ModelPart& r_model_part) { KRATOS_TRY //KRATOS_WATCH("Started constructing MAT STRUC Divergence Matrix") std::vector<int> indices; indices.reserve(1000); //count non zeros int total_nnz = 0; for (typename NodesContainerType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { //not to add lonely nodes to the system if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { //we add one because we have to account for the contribution of the node itself total_nnz += 1 + (it->GetValue(NEIGHBOUR_NODES)).size(); } } mD.reserve(total_nnz* TDim,false); unsigned int row_index; //fill the matrix row by row unsigned int dof_position = r_model_part.NodesBegin()->GetDofPosition(DISPLACEMENT_X); for (typename NodesArrayType::iterator it=r_model_part.NodesBegin(); it!=r_model_part.NodesEnd(); ++it) { WeakPointerVector< Node<3> >& neighb_nodes = it->GetValue(NEIGHBOUR_NODES); if( neighb_nodes.size() != 0 ) { //first row in the block row_index = it->GetDof(DISPLACEMENT_X,dof_position).EquationId(); //add id of the current node //NOTE: here and in the following we ASSUME that the ids of DISPLACEMENT_X _Y and _Z are sequential for(unsigned int kk = 0; kk<TDim; kk++) { indices.push_back(row_index + kk); } //filling and order the first neighbours list for( WeakPointerVector< Node<3> >::iterator i = neighb_nodes.begin(); i != neighb_nodes.end(); i++) { unsigned int tmp = (i->GetDof(DISPLACEMENT_X,dof_position)).EquationId(); for(unsigned int kk = 0; kk<TDim; kk++) { indices.push_back(tmp + kk); } } std::sort(indices.begin(),indices.end()); //fill D (the divergence matrix) - note that the "pressure index" is assumed to concide the DISPLACEMENT_X index divided by 3 for(unsigned int j=0; j<indices.size(); j++) { mD.push_back(row_index/TDim, indices[j] , 0.00); } //clean the indices (it is a work array) indices.erase(indices.begin(),indices.end()); } } //KRATOS_WATCH("FSI D") //KRATOS_WATCH(mD) //KRATOS_WATCH("Finished constructing MAT STRUC Divergence Matrix") KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void BuildAuxiliaries( TSystemMatrixType& mD,TSystemMatrixType& Mconsistent, TSystemVectorType& mMdiagInv, ModelPart& r_model_part) { KRATOS_TRY //KRATOS_WATCH("BUILDING AUXILIARY MATRIX D") //array_1d<double,TDim+1> rhs_contribution; #ifndef _OPENMP // BoundedMatrix::BoundedMatrix<double,TDim+1,TDim> DN_DX; BoundedMatrix<double,TDim+1,TDim> DN_DX; array_1d<double,TDim+1> N; array_1d<unsigned int ,TDim+1> local_indices; double Volume; double temp; //getting the dof position unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X); double aaa = 1.0/(TDim+1.0); //if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing for(ModelPart::ElementsContainerType::iterator i = r_model_part.ElementsBegin(); i!=r_model_part.ElementsEnd(); i++) { Geometry< Node<3> >& geom = i->GetGeometry(); //counting the n-r of structure nodes unsigned int str_nr=0; //for (int k = 0;k<TDim+1;k++) for (unsigned int k = 0; k<geom.size(); k++) { str_nr+=(unsigned int)(i->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE)); } /////////////////////////////////////////////////////////////////////////////////////////////// //if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing // that means, that the entries corresponding to the structural elements are zero /////////////////////////////////////////////////////////////////////////////////////////////// if (geom.size()!=str_nr) { GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); if (Volume<0) Volume*=-1.0; //finiding local indices //for(int ii = 0; ii<TDim+1; ii++) for(unsigned int ii = 0; ii<geom.size(); ii++) { local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId(); } //building matrix D (transpose of the gradient integrated by parts) temp = Volume*aaa; for(unsigned int row = 0; row<TDim+1; row++) { unsigned int row_index = local_indices[row] / (TDim); //ATTENTION! here i am doing a dangerous op //KRATOS_WATCH(row_index) //first write the lumped mass matrix mMdiagInv[row_index] += temp; for(unsigned int col = 0; col<TDim+1; col++) { for(unsigned int kkk = 0; kkk<TDim; kkk++) { //check if the below is correct (copied it from Mass matrix) unsigned int col_index = local_indices[col]+kkk; //unsigned int col_index = col + kkk; //FIRST THE DIVERGENCE MATRIX mD(row_index,col_index) += temp * DN_DX(col,kkk); //And now the consistent mass matrix if (row_index==col_index) { //Mconsistent(row_index,col_index) += temp * 2.0; if (TDim==2) Mconsistent(row_index,col_index) += 0.25*temp * 2.0; else if (TDim==3) Mconsistent(row_index,col_index) += 0.2*temp * 2.0*2.5; } else { //Mconsistent(row_index,col_index) += temp ; if (TDim==2) Mconsistent(row_index,col_index) += 0.25*temp ; else if (TDim==3) Mconsistent(row_index,col_index) += 0.2*temp*0.0 ; } } } } } } #else //creating an array of lock variables of the size of the system matrix std::vector< omp_lock_t > lock_array(mD.size1()); int D_size = mD.size1(); for (int i = 0; i < D_size; i++) omp_init_lock(&lock_array[i]); //create a partition of the element array int number_of_threads = omp_get_max_threads(); vector<unsigned int> element_partition; CreatePartition(number_of_threads, r_model_part.Elements().size(), element_partition); //KRATOS_WATCH(number_of_threads); //KRATOS_WATCH(element_partition); double start_prod = omp_get_wtime(); //#pragma omp parallel for private (DN_DX, N, local_indices, Volume, temp, aaa, dof_position) #pragma omp parallel for for (int k = 0; k < number_of_threads; k++) { BoundedMatrix<double,TDim+1,TDim> DN_DX; array_1d<double,TDim+1> N; array_1d<unsigned int ,TDim+1> local_indices; //array_1d<double,TDim+1> rhs_contribution; double Volume; double temp; //getting the dof position unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X); double aaa = 1.0/(TDim+1.0); //Element::EquationIdVectorType EquationId; //ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = r_model_part.Elements().ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = r_model_part.Elements().ptr_begin() + element_partition[k + 1]; // assemble all elements for (typename ElementsArrayType::ptr_iterator i = it_begin; i != it_end; ++i) { Geometry< Node<3> >& geom = (*i)->GetGeometry(); //counting the n-r of structure nodes unsigned int str_nr=0; //for (int k = 0;k<TDim+1;k++) for (unsigned int k = 0; k<geom.size(); k++) { str_nr+=(unsigned int)((*i)->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE)); } /////////////////////////////////////////////////////////////////////////////////////////////// //if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing // that means, that the entries corresponding to the structural elements are zero /////////////////////////////////////////////////////////////////////////////////////////////// if (geom.size()!=str_nr) { GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); if (Volume<0) Volume*=-1.0; //finiding local indices //for(int ii = 0; ii<TDim+1; ii++) for(unsigned int ii = 0; ii<geom.size(); ii++) { local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId(); } //building matrix D (transpose of the gradient integrated by parts) temp = Volume*aaa; for(unsigned int row = 0; row<TDim+1; row++) { unsigned int row_index = local_indices[row] / (TDim); //ATTENTION! here i am doing a dangerous op mMdiagInv[row_index] += temp; omp_set_lock(&lock_array[row_index]); //first write the lumped mass matrix //KRATOS_WATCH(row_index) for(unsigned int col = 0; col<TDim+1; col++) { unsigned int col_index = local_indices[col] /(TDim); if (row_index==col_index) { //Mconsistent(row_index,col_index) += temp * 2.0; if (TDim==2) Mconsistent(row_index,col_index) += 0.25*temp * 2.0; else if (TDim==3) Mconsistent(row_index,col_index) += 0.2*temp * 2.0; } else { //Mconsistent(row_index,col_index) += temp ; if (TDim==2) Mconsistent(row_index,col_index) += 0.25*temp ; else if (TDim==3) Mconsistent(row_index,col_index) += 0.2*temp ; } for(unsigned int kkk = 0; kkk<TDim; kkk++) { //check if the below is correct (copied it from Mass matrix) unsigned int col_index = local_indices[col]+kkk; //unsigned int col_index = col + kkk; //FIRST THE DIVERGENCE MATRIX mD(row_index,col_index) += temp * DN_DX(col,kkk); //And now the consistent mass matrix } } omp_unset_lock(&lock_array[row_index]); } } } } double stop_prod = omp_get_wtime(); std::cout << "time: " << stop_prod - start_prod << std::endl; for (int i = 0; i < D_size; i++) omp_destroy_lock(&lock_array[i]); #endif //this will be done sequentially in any case //inverting the lumped mass matrix for(unsigned int i = 0; i<TSparseSpace::Size(mMdiagInv); i++) { if (mMdiagInv[i]>1e-26) mMdiagInv[i] = 1.0/mMdiagInv[i]; else //if (mMdiagInv[i]==0.0) { //KRATOS_WATCH(mMdiagInv[i]) //KRATOS_THROW_ERROR(std::logic_error,"something is wrong with the mass matrix entry - ZERO!!!","") mMdiagInv[i] = 1000000000000.0; //KRATOS_WATCH(mMdiagInv[i]) //KRATOS_THROW_ERROR(std::logic_error,"Zero ELEMENT VOLUMEE!!!!!!!!!!!!!!","") //mMdiagInv[i] = 0.0; } } //KRATOS_WATCH("FINISHED BUILDING AUXILIARY MATRIX D") KRATOS_CATCH (" ") } /* void BuildAuxiliaries( TSystemMatrixType& mD, ModelPart& r_model_part) { KRATOS_TRY //KRATOS_WATCH("BUILDING AUXILIARY MATRIX D") boost::numeric::ublas::bounded_matrix<double,TDim+1,TDim> DN_DX; array_1d<double,TDim+1> N; array_1d<unsigned int ,TDim+1> local_indices; //array_1d<double,TDim+1> rhs_contribution; double Volume; double temp; //getting the dof position unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X); double aaa = 1.0/(TDim+1.0); #ifndef _OPENMP //if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing for(ModelPart::ElementsContainerType::iterator i = r_model_part.ElementsBegin(); i!=r_model_part.ElementsEnd(); i++) { Geometry< Node<3> >& geom = i->GetGeometry(); //counting the n-r of structure nodes unsigned int str_nr=0; //for (int k = 0;k<TDim+1;k++) for (unsigned int k = 0;k<geom.size();k++) { str_nr+=(unsigned int)(i->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE)); } /////////////////////////////////////////////////////////////////////////////////////////////// //if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing // that means, that the entries corresponding to the structural elements are zero /////////////////////////////////////////////////////////////////////////////////////////////// if (geom.size()!=str_nr) { GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); if (Volume<0) Volume*=-1.0; //finiding local indices //for(int ii = 0; ii<TDim+1; ii++) for(unsigned int ii = 0; ii<geom.size(); ii++) { local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId(); } //building matrix D (transpose of the gradient integrated by parts) temp = Volume*aaa; for(unsigned int row = 0; row<TDim+1; row++) { unsigned int row_index = local_indices[row] / (TDim); //ATTENTION! here i am doing a dangerous op //KRATOS_WATCH(row_index) for(unsigned int col = 0; col<TDim+1; col++) { for(unsigned int kkk = 0; kkk<TDim; kkk++) { //check if the below is correct (copied it from Mass matrix) unsigned int col_index = local_indices[col]+kkk; //unsigned int col_index = col + kkk; mD(row_index,col_index) += temp * DN_DX(col,kkk); } } } } } #else //creating an array of lock variables of the size of the system matrix std::vector< omp_lock_t > lock_array(mD.size1()); int D_size = mD.size1(); for (int i = 0; i < D_size; i++) omp_init_lock(&lock_array[i]); //create a partition of the element array int number_of_threads = omp_get_max_threads(); vector<unsigned int> element_partition; CreatePartition(number_of_threads, r_model_part.Elements().size(), element_partition); KRATOS_WATCH(number_of_threads); KRATOS_WATCH(element_partition); double start_prod = omp_get_wtime(); //#pragma omp parallel for private (DN_DX, N, local_indices, Volume, temp, aaa, dof_position) #pragma omp parallel for for (int k = 0; k < number_of_threads; k++) { boost::numeric::ublas::bounded_matrix<double,TDim+1,TDim> DN_DX; array_1d<double,TDim+1> N; array_1d<unsigned int ,TDim+1> local_indices; //array_1d<double,TDim+1> rhs_contribution; double Volume; double temp; //getting the dof position unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X); double aaa = 1.0/(TDim+1.0); //Element::EquationIdVectorType EquationId; //ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = r_model_part.Elements().ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = r_model_part.Elements().ptr_begin() + element_partition[k + 1]; // assemble all elements for (typename ElementsArrayType::ptr_iterator i = it_begin; i != it_end; ++i) { Geometry< Node<3> >& geom = (*i)->GetGeometry(); //counting the n-r of structure nodes unsigned int str_nr=0; //for (int k = 0;k<TDim+1;k++) for (unsigned int k = 0;k<geom.size();k++) { str_nr+=(unsigned int)((*i)->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE)); } /////////////////////////////////////////////////////////////////////////////////////////////// //if the element is not having all the nodes IS_STRUCTURE, assemble it, otherwise do nothing // that means, that the entries corresponding to the structural elements are zero /////////////////////////////////////////////////////////////////////////////////////////////// if (geom.size()!=str_nr) { GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); if (Volume<0) Volume*=-1.0; //finiding local indices //for(int ii = 0; ii<TDim+1; ii++) for(unsigned int ii = 0; ii<geom.size(); ii++) { local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId(); } //building matrix D (transpose of the gradient integrated by parts) temp = Volume*aaa; for(unsigned int row = 0; row<TDim+1; row++) { unsigned int row_index = local_indices[row] / (TDim); //ATTENTION! here i am doing a dangerous op omp_set_lock(&lock_array[row_index]); //KRATOS_WATCH(row_index) for(unsigned int col = 0; col<TDim+1; col++) { for(unsigned int kkk = 0; kkk<TDim; kkk++) { //check if the below is correct (copied it from Mass matrix) unsigned int col_index = local_indices[col]+kkk; //unsigned int col_index = col + kkk; mD(row_index,col_index) += temp * DN_DX(col,kkk); } } omp_unset_lock(&lock_array[row_index]); } } } } double stop_prod = omp_get_wtime(); std::cout << "time: " << stop_prod - start_prod << std::endl; for (int i = 0; i < D_size; i++) omp_destroy_lock(&lock_array[i]); #endif //KRATOS_WATCH("FINISHED BUILDING AUXILIARY MATRIX D") KRATOS_CATCH (" ") } */ //************************************************************************** //************************************************************************** // //assembles consistent and lumped mass matrices void AssembleMassMatrices(TSystemMatrixType& Mconsistent, TSystemVectorType& mMdiagInv, ModelPart& r_model_part) { //first we assemble the diagonal mass matrix KRATOS_TRY //KRATOS_WATCH("BUILDING MASS MATRICES ") BoundedMatrix<double,TDim+1,TDim> DN_DX; array_1d<double,TDim+1> N; array_1d<unsigned int ,TDim+1> local_indices; //array_1d<double,TDim+1> rhs_contribution; double Volume; double temp; //getting the dof position unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X); double aaa = 1.0/(TDim+1.0); for(ModelPart::ElementsContainerType::iterator i = r_model_part.ElementsBegin(); i!=r_model_part.ElementsEnd(); i++) { Geometry< Node<3> >& geom = i->GetGeometry(); //counting number of structural nodes unsigned int str_nr=0; //for (int k = 0;k<TDim+1;k++) for (unsigned int k = 0; k<geom.size(); k++) { str_nr+=int(i->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE)); } //we do not do anything for the elements of the structure (all nodes are IS_STR) if (geom.size()!=str_nr) { GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); if (Volume<0) Volume*=-1.0; //finiding local indices //for(int ii = 0; ii<TDim+1; ii++) for(unsigned int ii = 0; ii<geom.size(); ii++) { local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId(); } temp = Volume*aaa; for(unsigned int row = 0; row<TDim+1; row++) { unsigned int row_index = local_indices[row] / (TDim); mMdiagInv[row_index] += temp; } } } //KRATOS_WATCH(mMdiagInv) //inverting the mass matrix for(unsigned int i = 0; i<TSparseSpace::Size(mMdiagInv); i++) { if (mMdiagInv[i]>1e-26) mMdiagInv[i] = 1.0/mMdiagInv[i]; else //if (mMdiagInv[i]==0.0) { //KRATOS_WATCH(mMdiagInv[i]) //KRATOS_THROW_ERROR(std::logic_error,"something is wrong with the mass matrix entry - ZERO!!!","") mMdiagInv[i] = 1000000000000.0; //KRATOS_WATCH(mMdiagInv[i]) //KRATOS_THROW_ERROR(std::logic_error,"Zero ELEMENT VOLUMEE!!!!!!!!!!!!!!","") //mMdiagInv[i] = 0.0; } } //KRATOS_WATCH(mMdiagInv) //AND NOW WE BUILD THE CONSISTENT MASS MATRIX for(ModelPart::ElementsContainerType::iterator i = r_model_part.ElementsBegin(); i!=r_model_part.ElementsEnd(); i++) { Geometry< Node<3> >& geom = i->GetGeometry(); unsigned int str_nr=0; for (unsigned int k = 0; k<i->GetGeometry().size(); k++) { str_nr+=(unsigned int)(i->GetGeometry()[k].FastGetSolutionStepValue(IS_STRUCTURE)); } if (geom.size()!=str_nr) { GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); if (Volume<0) Volume*=-1.0; //finiding local indices //for(int ii = 0; ii<TDim+1; ii++) for(unsigned int ii = 0; ii<geom.size(); ii++) { local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId(); } temp = Volume*aaa; //element mass matrix has a shape: // 2 1 1 // A/12.0* 1 2 1 in 2D // 1 1 2 // // and // // 2 1 1 1 // V/20.0* 1 2 1 1 in 3D // 1 1 2 1 // 1 1 1 2 //nothing should be added in case of membrane for(unsigned int row = 0; row<TDim+1; row++) { unsigned int row_index = local_indices[row] / (TDim); //pressure is a scalar=>matrix size is Tdim times smaller than for vector for(unsigned int col = 0; col<TDim+1; col++) { unsigned int col_index = local_indices[col] /(TDim); if (row_index==col_index) { //Mconsistent(row_index,col_index) += temp * 2.0; if (TDim==2) Mconsistent(row_index,col_index) += 0.25*temp * 2.0; else if (TDim==3) Mconsistent(row_index,col_index) += 0.2*temp * 2.0; } else { //Mconsistent(row_index,col_index) += temp ; if (TDim==2) Mconsistent(row_index,col_index) += 0.25*temp ; else if (TDim==3) Mconsistent(row_index,col_index) += 0.2*temp; } } } } } // KRATOS_WATCH("FINISHED BUILDING MASS MATRICES ") KRATOS_CATCH("") } //output += trans(input)*input // void calc_prod_precond_vec( TSystemVectorType& vec, TSystemVectorType& precond, TSystemVectorType& result) { KRATOS_TRY if ( precond.size()!=vec.size() ) KRATOS_THROW_ERROR(std::logic_error,"preconditioner size is wrong","") if ( precond.size()!=result.size() ) KRATOS_THROW_ERROR(std::logic_error,"preconditioner size is wrong","") TSparseSpace::SetToZero(result); //typedef unsigned int size_type; //typedef double value_type; //KRATOS_WATCH(precond) #pragma omp parallel for for (int i=0; i<static_cast<int>(precond.size()); i++) { result[i]=precond[i]*vec[i]; } KRATOS_CATCH(""); } void calc_GMinvD_prod(TSystemMatrixType& mD, TSystemVectorType& Minv, TSystemVectorType& x, TSystemVectorType& WorkArray, TSystemVectorType& destination) { KRATOS_TRY //KRATOS_WATCH("COMPUTING GM-1D") //typedef unsigned int size_type; //typedef double value_type; TSparseSpace::SetToZero(WorkArray); //TSparseSpace::SetToZero(destination); //WorkArray = D * x TSparseSpace::Mult(mD, x, WorkArray); //KRATOS_WATCH(WorkArray) //WorkArray = Minv * WorkArray #pragma omp parallel for for(int i=0; i<static_cast<int>(WorkArray.size()); i++) { WorkArray[i] *= Minv[i]; } //destination = trans(D) * WorkArray //TSparseSpace::TransposeMult(D, x, WorkArray); TSparseSpace::TransposeMult(mD, WorkArray, destination); //KRATOS_WATCH(destination) //KRATOS_WATCH("FINISHED COMPUTING GM-1D") KRATOS_CATCH(""); } //************************************************************************************************* //************************************************************************************************* void ReturnDx( TSystemVectorType& Dx, TSystemVectorType& xi) { KRATOS_TRY //TSparseSpace::SetToZero(Dx); if ( Dx.size()!=xi.size() ) KRATOS_THROW_ERROR(std::logic_error,"Dx and xi sizes mismatch","") Dx=xi; KRATOS_CATCH(""); } //************************************************************************************************* //************************************************************************************************* void CalculatePreconditionerDiagonalMatrix(const TSystemMatrixType& D, const TSystemVectorType& Minv, const TSystemMatrixType& A, TSystemVectorType& preconditioner) { KRATOS_TRY //KRATOS_WATCH("COMPUTING preconditioner") typedef unsigned int size_type; typedef double value_type; TSparseSpace::SetToZero(preconditioner); if ( preconditioner.size()!=A.size1() ) KRATOS_THROW_ERROR(std::logic_error,"preconditioner size is wrong","") //get diagonal of matrix A for(unsigned int i = 0; i<A.size1(); i++) { preconditioner[i] = A(i,i); } //TSparseSpace::SetToZero(preconditioner); //calculate and add diagonal of G*Minv*D //using that G*Minv*D(i,i) = D_k for (size_type k = 0; k < D.size1 (); ++ k) { size_type begin = D.index1_data () [k]; size_type end = D.index1_data () [k + 1]; for (size_type i = begin; i < end; ++ i) { unsigned int index_i = D.index2_data () [i]; value_type data_i = D.value_data()[i]; preconditioner[index_i] += Minv[k]*data_i*data_i; } } //KRATOS_WATCH(preconditioner) //invert the preconditioner matrix for(unsigned int i = 0; i<A.size1(); i++) { if (fabs(preconditioner[i])>1e-26) //preconditioner[i] = 1.00/preconditioner[i]; preconditioner[i] = 1.00/preconditioner[i]; else preconditioner[i] = 1000000000000000000.0; if (preconditioner[i]<0.0) preconditioner[i]*=-10000000000000000000.0; //preconditioner[i]*=1000000000000000000.0; /* if (preconditioner[i]<0.0) { //preconditioner[i]=1.0; KRATOS_THROW_ERROR(std::logic_error,"NEGATIVE PRECONDITIONER","") } */ } //KRATOS_WATCH("Finished COMPUTING preconditioner") KRATOS_CATCH(""); } //************************************************************************************************* //************************************************************************************************* bool ConvergenceCheck (TSystemVectorType& residual, TSystemVectorType& b, const double& tolerance, const int& iter_number, const int& max_iter_number) { //const DataType abs_toll = DataType(1e-15); // //absolute tolerance = 1e-15 // if (iter_number>max_iter_number) KRATOS_THROW_ERROR(std::logic_error,"MAX NUMBER OF ITERATIONS EXCEEDED, UR CG DIDNT CONVERGE","") if (TSparseSpace::TwoNorm(residual)<1e-15) return true; else { const double& ratio = TSparseSpace::TwoNorm(residual)/TSparseSpace::TwoNorm(b); //KRATOS_WATCH(ratio) return( (ratio) < tolerance); } } //************************************************************************************************* //************************************************************************************************* void ModifyForDirichlet (TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY double large_number = 1e20; for(typename DofsArrayType::iterator i_dof = BaseType::mDofSet.begin() ; i_dof != BaseType::mDofSet.end() ; ++i_dof) { if(i_dof->IsFixed() == true) { unsigned int eq_id = i_dof->EquationId(); A(eq_id,eq_id) += large_number; //b[eq_id] = 0.0001; } } KRATOS_CATCH(""); } void CalculateNodalPressureForce (TSystemMatrixType& mD,TSystemVectorType& mMdiagInv,ModelPart& r_model_part) { KRATOS_TRY int i=0; unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X); const int size = TSparseSpace::Size(mMdiagInv); TSystemVectorType p(size); TSystemVectorType f_p(3*size); i=0; for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0)// && in->FastGetSolutionStepValue(IS_FLUID)==1.0) { i=in->GetDof(DISPLACEMENT_X,dof_position).EquationId()/TDim; p[i]=in->FastGetSolutionStepValue(PRESSURE); } } TSparseSpace::TransposeMult(mD, p, f_p); for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0)// && in->FastGetSolutionStepValue(IS_FLUID)==1.0) { in->FastGetSolutionStepValue(FORCE_X)=f_p[in->GetDof(DISPLACEMENT_X,dof_position).EquationId()]; in->FastGetSolutionStepValue(FORCE_Y)=f_p[in->GetDof(DISPLACEMENT_Y,dof_position).EquationId()]; in->FastGetSolutionStepValue(FORCE_Z)=f_p[in->GetDof(DISPLACEMENT_Z,dof_position).EquationId()]; } } KRATOS_CATCH(""); } void ComputePressureAtFreeSurface (ModelPart& r_model_part, double bulk_modulus, double density) { for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0)// && in->FastGetSolutionStepValue(IS_FLUID)==1.0) { if (in->FastGetSolutionStepValue(IS_FLUID)==1.0 && in->FastGetSolutionStepValue(IS_FREE_SURFACE)==1.0) { //KRATOS_WATCH("Computing pressure at a free surface node") in->FastGetSolutionStepValue(PRESSURE)=bulk_modulus*density*(in->FastGetSolutionStepValue(NODAL_AREA) - in->FastGetSolutionStepValue(NODAL_AREA,1))/(in->FastGetSolutionStepValue(NODAL_AREA)); //=in->FastGetSolutionStepValue(PRESSURE,1)+bulk_modulus*density*(in->FastGetSolutionStepValue(NODAL_AREA) - in->FastGetSolutionStepValue(NODAL_AREA,1))/(in->FastGetSolutionStepValue(NODAL_AREA)); } } } } /////////////////////////////////////////////////////////////////////////// /* void CalculateLupmedMass(ModelPart& model_part) { KRATOS_TRY double dummy=0.0; ProcessInfo& proc_info = model_part.GetProcessInfo(); for (typename ModelPart::ElementsContainerType::iterator im=model_part.ElementsBegin(); im!=model_part.ElementsEnd(); ++im) { im->Calculate(NODAL_MASS, dummy, proc_info); } KRATOS_CATCH(""); } */ void SavePressureIteration(ModelPart& model_part) { KRATOS_TRY double pres=0.0; for (typename ModelPart::NodesContainerType::iterator it=model_part.NodesBegin(); it!=model_part.NodesEnd(); ++it) { pres=it->FastGetSolutionStepValue(PRESSURE); it->FastGetSolutionStepValue(PRESSURE_OLD_IT)=pres; } KRATOS_CATCH(""); } ///////////////// this is a function for performing the projection step of the ULF-FRAC method void FractionalStepProjection(ModelPart& model_part, double alpha_bossak) { KRATOS_TRY // double aaa=0.0; double dt = model_part.GetProcessInfo()[DELTA_TIME]; BoundedMatrix<double,3,2> DN_DX; array_1d<double,3> N; array_1d<double,3> aux0, aux1, aux2; //this are sized to 3 even in 2D!! //reset the auxilliary vector for (typename ModelPart::NodesContainerType::iterator it=model_part.NodesBegin(); it!=model_part.NodesEnd(); ++it) { it->FastGetSolutionStepValue(VAUX)=ZeroVector(3); } //calculate the velocity correction and store it in VAUX for (typename ModelPart::ElementsContainerType::iterator im=model_part.ElementsBegin(); im!=model_part.ElementsEnd(); ++im) { //get the list of nodes of the element Geometry< Node<3> >& geom = im->GetGeometry(); double volume; GeometryUtils::CalculateGeometryData(geom, DN_DX, N, volume); array_1d<double,3> pres_inc; //pres_inc[0] = geom[0].FastGetSolutionStepValue(PRESSURE,1)-geom[0].FastGetSolutionStepValue(PRESSURE); //pres_inc[1] = geom[1].FastGetSolutionStepValue(PRESSURE,1)-geom[1].FastGetSolutionStepValue(PRESSURE); //pres_inc[2] = geom[2].FastGetSolutionStepValue(PRESSURE,1)-geom[2].FastGetSolutionStepValue(PRESSURE); pres_inc[0] = geom[0].FastGetSolutionStepValue(PRESSURE_OLD_IT)-geom[0].FastGetSolutionStepValue(PRESSURE); pres_inc[1] = geom[1].FastGetSolutionStepValue(PRESSURE_OLD_IT)-geom[1].FastGetSolutionStepValue(PRESSURE); pres_inc[2] = geom[2].FastGetSolutionStepValue(PRESSURE_OLD_IT)-geom[2].FastGetSolutionStepValue(PRESSURE); //KRATOS_WATCH(pres_inc[0]) //KRATOS_WATCH(pres_inc[1]) //KRATOS_WATCH(pres_inc[2]) //Riccardo's modification: multiply the G(p_n+1-p_n) by 1/2 //pres_inc*=0.5; //Gradient operator G: BoundedMatrix<double,6,2> shape_func = ZeroMatrix(6, 2); BoundedMatrix<double,6,3> G = ZeroMatrix(6,3); for (int ii = 0; ii< 3; ii++) { int column = ii*2; shape_func(column,0) = N[ii]; shape_func(column + 1, 1) = shape_func(column,0); } noalias(G)=prod(shape_func, trans(DN_DX)); G*=volume; array_1d<double,6> aaa; noalias(aaa) = prod(G,pres_inc); array_1d<double,3> aux; aux[0]=aaa[0]; aux[1]=aaa[1]; //z-component is zero aux[2]=0.0; geom[0].FastGetSolutionStepValue(VAUX) += aux; //reusing aux for the second node aux[0]=aaa[2]; aux[1]=aaa[3]; //z-component is zero geom[1].FastGetSolutionStepValue(VAUX) += aux; //reusing aux for the third node aux[0]=aaa[4]; aux[1]=aaa[5]; geom[2].FastGetSolutionStepValue(VAUX) += aux; } //double beta_newm=0.25*(1.0-alpha_bossak)*(1.0-alpha_bossak); alpha_bossak=-0.3; double coef=0.25*(1.0-alpha_bossak); //double beta_newm=coef*(1.0-alpha_bossak); for (typename ModelPart::NodesContainerType::iterator it=model_part.NodesBegin(); it!=model_part.NodesEnd(); ++it) { if( (it->GetValue(NEIGHBOUR_NODES)).size() != 0) { //VELOCITY = VELOCITY + dt * Minv * VAUX if (it->FastGetSolutionStepValue(NODAL_MASS)>0.0000000001) //KRATOS_THROW_ERROR(std::logic_error, "You have not computed the nodal mass!", ""); { double dt_sq_Minv =coef*dt*dt / it->FastGetSolutionStepValue(NODAL_MASS); array_1d<double,3>& temp = it->FastGetSolutionStepValue(VAUX); if(!it->IsFixed(DISPLACEMENT_X)) { it->FastGetSolutionStepValue(DISPLACEMENT_X)+=dt_sq_Minv*temp[0]; } if(!it->IsFixed(DISPLACEMENT_Y)) { it->FastGetSolutionStepValue(DISPLACEMENT_Y)+=dt_sq_Minv*temp[1]; } } } } KRATOS_CATCH(""); } //////////////////////////////////////////////////////////////////////////////////////////////////////// void UpdateAfterProjection( ModelPart& model_part, double alpha_bossak) { KRATOS_TRY //updating time derivatives (nodally for efficiency) double dt = model_part.GetProcessInfo()[DELTA_TIME]; array_1d<double,3> DeltaDisp; double beta_newmark = 0.25*pow((1.00-alpha_bossak),2); double gamma_newmark = 0.5-alpha_bossak; /* ma0 = 1.0/(mBetaNewmark*pow(DeltaTime,2)); ma1 = mGammaNewmark / (mBetaNewmark*DeltaTime); ma2 = 1.0/(mBetaNewmark*DeltaTime); ma3 = 1.0/(2.0*mBetaNewmark) - 1.0; ma4 = mGammaNewmark/mBetaNewmark - 1.0; */ double ma0=1.0/(beta_newmark*pow(dt,2)); double ma1=gamma_newmark/(beta_newmark*dt); double ma2=1.0/(beta_newmark*dt); double ma3=(1.0/(2.0*beta_newmark))-1.0; double ma4=(gamma_newmark/beta_newmark)-1.0; double ma5=dt*0.5*((gamma_newmark/beta_newmark)-2.0); for(ModelPart::NodeIterator i = model_part.NodesBegin() ; i != model_part.NodesEnd() ; ++i) { noalias(DeltaDisp) = (i)->FastGetSolutionStepValue(DISPLACEMENT) - (i)->FastGetSolutionStepValue(DISPLACEMENT,1); array_1d<double,3>& CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY,0); array_1d<double,3>& OldVelocity = (i)->FastGetSolutionStepValue(VELOCITY,1); array_1d<double,3>& CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION,0); array_1d<double,3>& OldAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION,1); UpdateVelocity(CurrentVelocity,DeltaDisp,OldVelocity,OldAcceleration, ma1, ma4, ma5); UpdateAcceleration(CurrentAcceleration,DeltaDisp,OldVelocity,OldAcceleration, ma0, ma2, ma3); } KRATOS_CATCH(""); } //////////////////////////////////////////////////////////////////////////////////////////////////////// inline void UpdateVelocity(array_1d<double, 3>& CurrentVelocity, const array_1d<double, 3>& DeltaDisp, const array_1d<double, 3>& OldVelocity, const array_1d<double, 3>& OldAcceleration, double& ma1, double& ma4, double & ma5) { noalias(CurrentVelocity) = ma1*DeltaDisp - ma4*OldVelocity - ma5*OldAcceleration; } //////////////////////////////////////////////////////////////////////////////////////////////////////// inline void UpdateAcceleration(array_1d<double, 3>& CurrentAcceleration, const array_1d<double, 3>& DeltaDisp, const array_1d<double, 3>& OldVelocity, const array_1d<double, 3>& OldAcceleration, double& ma0, double& ma2, double & ma3) { noalias(CurrentAcceleration) = ma0*DeltaDisp - ma2*OldVelocity - ma3*OldAcceleration; } void UpdatePressuresNew (TSystemMatrixType& mMconsistent, TSystemVectorType& mMdiagInv,ModelPart& r_model_part, double bulk_modulus, double density) { KRATOS_TRY //getting the dof position unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X); // const double dt = r_model_part.GetProcessInfo()[DELTA_TIME]; //!!!! LATER ON - CHANGE THE WAY TO COMPUTE BULK MODULUS INSTEAD OF PASSING IT AS A PARAMETER //resetting the pressures to zero for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { in->FastGetSolutionStepValue(PRESSURE)=0.0; } //for pressure vectors const int size = TSparseSpace::Size(mMdiagInv); TSystemVectorType p_n(size); //TSystemMatrixType aux(size,size); //aux=ZeroMatrix(size,size); TSystemVectorType temp(size); TSystemVectorType history(size); //TSparseSpace::SetToZero(p_n1); TSparseSpace::SetToZero(p_n); TSparseSpace::SetToZero(history); //assuming that the bulk modulus is the same for all nodes in the model part //p_n is the history, d_a - change_of_nodal_area/current_nodal_area int i=0; for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0 )// && in->FastGetSolutionStepValue(IS_FLUID)==1.0) { i=in->GetDof(DISPLACEMENT_X,dof_position).EquationId()/TDim; p_n[i]=in->FastGetSolutionStepValue(PRESSURE,1); } } //KRATOS_WATCH(p_n) //history (multiplied by the consistent mass matrix) and then by the inverse lumped mass matrix TSparseSpace::Mult(mMconsistent, p_n, history); //KRATOS_WATCH(history) int aa=0; for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0)// && in->FastGetSolutionStepValue(IS_FLUID)==1.0) { aa=in->GetDof(DISPLACEMENT_X,dof_position).EquationId()/TDim; if (in->FastGetSolutionStepValue(IS_FLUID)==1.0) { //+temp[aa]/density in->FastGetSolutionStepValue(PRESSURE)=(mMdiagInv[aa]*history[aa])+bulk_modulus*density*(in->FastGetSolutionStepValue(NODAL_AREA) - in->FastGetSolutionStepValue(NODAL_AREA,1))/(in->FastGetSolutionStepValue(NODAL_AREA)); //this one is without mass matrix difference stab, just the laplacian //in->FastGetSolutionStepValue(PRESSURE)=p_n[aa]+temp[aa]/density+bulk_modulus*density*(in->FastGetSolutionStepValue(NODAL_AREA) - in->FastGetSolutionStepValue(NODAL_AREA,1))/(in->FastGetSolutionStepValue(NODAL_AREA)); } } } KRATOS_CATCH(""); } //this function updates pressure after the Dx is obtained at every step of N-R procedure void UpdatePressures ( TSystemMatrixType& mD, TSystemMatrixType& mMconsistent, TSystemVectorType& mMdiagInv,ModelPart& r_model_part, double bulk_modulus, double density) { KRATOS_TRY //getting the dof position // unsigned int dof_position = (r_model_part.NodesBegin())->GetDofPosition(DISPLACEMENT_X); // const double dt = r_model_part.GetProcessInfo()[DELTA_TIME]; //!!!! LATER ON - CHANGE THE WAY TO COMPUTE BULK MODULUS INSTEAD OF PASSING IT AS A PARAMETER //for pressure vectors const int size = TSparseSpace::Size(mMdiagInv); //for displacement vectors const int size_disp = TDim*TSparseSpace::Size(mMdiagInv); TSystemVectorType p_n(size); TSystemVectorType dp(size); TSystemVectorType p_n1(size); TSystemVectorType history(size); //TSystemVectorType temp1(size); //TSystemVectorType temp2(size); TSparseSpace::SetToZero(p_n); TSparseSpace::SetToZero(dp); TSparseSpace::SetToZero(p_n1); TSparseSpace::SetToZero(history); //TSparseSpace::SetToZero(temp1); //TSparseSpace::SetToZero(temp2); TSystemMatrixType aux(size,size); TSystemVectorType temp(size); TSystemVectorType displ(size_disp); /* TSystemMatrixType GlobLapl (size,size); TSystemMatrixType LocLapl (TDim+1,TDim+1); for (typename ElementsArrayType::iterator im=r_model_part.ElementsBegin(); im!=r_model_part.ElementsEnd(); ++im) { boost::numeric::ublas::bounded_matrix<double,TDim+1,TDim> DN_DX; array_1d<double,TDim+1> N; array_1d<unsigned int ,TDim+1> local_indices; Geometry< Node<3> >& geom = im->GetGeometry(); //calculating elemental values double Volume; GeometryUtils::CalculateGeometryData(geom, DN_DX, N, Volume); array_1d<double,3> ms_vel_gauss = ZeroVector(3); const array_1d<double,3>& fv0 = geom[0].FastGetSolutionStepValue(VELOCITY); const array_1d<double,3>& fv1 = geom[1].FastGetSolutionStepValue(VELOCITY); const array_1d<double,3>& fv2 = geom[2].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> fv3 = ZeroVector(3); if (TDim==3) fv3 = geom[3].FastGetSolutionStepValue(VELOCITY); double nu = geom[0].FastGetSolutionStepValue(VISCOSITY)+ geom[1].FastGetSolutionStepValue(VISCOSITY) + geom[2].FastGetSolutionStepValue(VISCOSITY); double density = geom[0].FastGetSolutionStepValue(DENSITY)+ geom[1].FastGetSolutionStepValue(DENSITY) + geom[2].FastGetSolutionStepValue(DENSITY); ms_vel_gauss=fv0+fv1+fv2; if (TDim==2) { nu*=0.33333333333; density*=0.33333333333; ms_vel_gauss*=0.33333333333; } if (TDim==3) { ms_vel_gauss+=fv3; nu+=geom[3].FastGetSolutionStepValue(VISCOSITY); density+=geom[3].FastGetSolutionStepValue(DENSITY); ms_vel_gauss*=0.25; nu*=0.25; density*=0.25; } //finiding local indices //for(int ii = 0; ii<TDim+1; ii++) for(unsigned int ii = 0; ii<geom.size(); ii++) { local_indices[ii] = geom[ii].GetDof(DISPLACEMENT_X,dof_position).EquationId(); } //the structural elements should not contribute to the Laplacian int str_nr=0; for (unsigned int k = 0;k<geom.size();k++) { str_nr+=(unsigned int)(geom[k].FastGetSolutionStepValue(IS_STRUCTURE)); } int switch_var=0; //set to zero the entries of the str. elements if (str_nr==TDim+1) switch_var=0; else switch_var =1; //ms_vel_gauss[i] = msN[0]*(fv0[i]) + msN[1]*(fv1[i]) + msN[2]*(fv2[i]); //but with one integration N=0.333333333 double norm_u; double h; if (TDim==2) { ms_vel_gauss[0] = 0.33333333333333*(fv0[0]+fv1[0]+fv2[0]); ms_vel_gauss[1] = 0.33333333333333*(fv0[1]+fv1[1]+fv2[1]); ms_vel_gauss[2] = 0.0; //calculating parameter tau (saved internally to each element) h = sqrt(2.00*Volume); norm_u = ms_vel_gauss[0]*ms_vel_gauss[0] + ms_vel_gauss[1]*ms_vel_gauss[1]; norm_u = sqrt(norm_u); } if (TDim==3) { ms_vel_gauss[0] = 0.25*(fv0[0]+fv1[0]+fv2[0]+fv3[0]); ms_vel_gauss[1] = 0.25*(fv0[1]+fv1[1]+fv2[1]+fv3[1]); ms_vel_gauss[2] = 0.25*(fv0[2]+fv1[2]+fv2[2]+fv3[2]); //calculating parameter tau (saved internally to each element) h = sqrt(2.00*Volume); norm_u = ms_vel_gauss[0]*ms_vel_gauss[0] + ms_vel_gauss[1]*ms_vel_gauss[1] + ms_vel_gauss[2]*ms_vel_gauss[2]; norm_u = sqrt(norm_u); } //- 4.0/(bulk_modulus*h*h) //double tau = 1.00 / ( 4.00*nu/(h*h) - bulk_modulus*dt/h+2.00*norm_u/h); //double tau=(bulk_modulus)*dt*h/(norm_u+nu/h); //double tau=(bulk_modulus)*dt*dt;//h/(norm_u+*nu/h); //my last proposal double tau = (bulk_modulus)*dt*nu/(norm_u*norm_u+(nu/dt)); //Ric's proposal - doesnt work - checked with 2d-splash //double tau = (bulk_modulus)*dt*1.0/((1.0/dt)+(nu/h*h)); //SWITCHED OFF THE STABILIZATION! switch_var=0; noalias(LocLapl)=switch_var*prod(DN_DX,trans(DN_DX)); for(unsigned int row = 0; row<TDim+1; row++) { unsigned int row_index = local_indices[row] / (TDim); for(unsigned int col = 0; col<TDim+1; col++) { unsigned int col_index = local_indices[col] /(TDim); GlobLapl(row_index, col_index)+=tau*Volume*LocLapl(row,col); } } //end of the loop over elements } for (int i=0;i<mMdiagInv.size(); i++) { aux(i,i)=GlobLapl(i,i)*mMdiagInv(i); } */ //assuming that the bulk modulus is the same for all nodes in the model part // //additionally here we update densities, simply by implyimg: ro_0xV_0=ro_1xV_1 int i=0; for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { //in pn we save old pressures if (i<size) p_n[i]=in->FastGetSolutionStepValue(PRESSURE,1); i++; //here we update densities //if (in->FastGetSolutionStepValue(NODAL_AREA)!=0.0) // in->FastGetSolutionStepValue(DENSITY)=in->FastGetSolutionStepValue(DENSITY,1)*in->FastGetSolutionStepValue(NODAL_AREA,1)/in->FastGetSolutionStepValue(NODAL_AREA); } } //temp = prod(aux, p_n); //history (multiplied by the consistent mass matrix) TSparseSpace::Mult(mMconsistent, p_n, history); //now we compute the pressure increment //first we save in the p_n1 the current deltap = KDd //Dx is denoted by d // //we store displacements in one big vector for(typename DofsArrayType::iterator i_dof = BaseType::mDofSet.begin() ; i_dof != BaseType::mDofSet.end() ; ++i_dof) { displ[i_dof->EquationId()]=i_dof->GetSolutionStepValue()-i_dof->GetSolutionStepValue(1); } TSparseSpace::Mult(mD, displ, dp); //KRATOS_WATCH(bulk_modulus) dp*=(bulk_modulus*density); //now we add the history (multiplied by the consistent mass matrix) //adding: mMconsistent*p_n + KDdipsl //p_n1=(temp+dp); //and now we multiply the result with the inverse of the lumped mass matrix //we reutilize the auxilliary matrix temp for (int ii=0; ii<size; ii++) { //temp1[ii]=mMdiagInv[ii]*p_n1[ii]; p_n1[ii]=mMdiagInv[ii]*(history[ii]+dp[ii]); } //this is just to check //for (int ii=0; ii<size;ii++) //{ //temp2[ii]=(mMdiagInv[ii]*dp[ii])+p_n[ii]; //} //resetting the pressures to zero // for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { in->FastGetSolutionStepValue(PRESSURE)=0.0; } } int aa=0; for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { if( (in->GetValue(NEIGHBOUR_NODES)).size() != 0 ) { //not to add the "lonely" nodes , that are not part of the model (e.g. structure walls) if (aa<size) in->FastGetSolutionStepValue(PRESSURE)=p_n1[aa];//+temp[aa]/density; //in->FastGetSolutionStepValue(PRESSURE)=temp2[aa]; aa++; } } //KRATOS_WATCH("PRESSURE UPDATE FUNCTION INSIDE BULDER AND SOLVER") /* for (typename NodesArrayType::iterator in=r_model_part.NodesBegin(); in!=r_model_part.NodesEnd(); ++in) { KRATOS_WATCH(in->FastGetSolutionStepValue(PRESSURE)); } */ KRATOS_CATCH(""); } //************************************************************************** //************************************************************************** /* void SystemSolve( const TSystemMatrixType& A, const TSystemMatrixType& D, const TSystemVectorType& mMass_inverse, const TSystemVectorType& mpreconditioner, TSystemVectorType& x, const TSystemVectorType& b ) { KRATOS_TRY const int size = TSparseSpaceType::Size(rX); unsigned int IterationsNumber = 0; TSystemVectorType r(size); TSystemVectorType q(size); PreconditionedMult(rA,rX,r); TSparseSpaceType::ScaleAndAdd(1.00, rB, -1.00, r); BaseType::mBNorm = TSparseSpaceType::TwoNorm(rB); VectorType p(r); VectorType q(size); double roh0 = TSparseSpaceType::Dot(r, r); double roh1 = roh0; double beta = 0; if(fabs(roh0) < 1.0e-30) //modification by Riccardo // if(roh0 == 0.00) return false; do { PreconditionedMult(rA,p,q); double pq = TSparseSpaceType::Dot(p,q); //if(pq == 0.00) if(fabs(pq) <= 1.0e-30) break; double alpha = roh0 / pq; TSparseSpaceType::ScaleAndAdd(alpha, p, 1.00, rX); TSparseSpaceType::ScaleAndAdd(-alpha, q, 1.00, r); roh1 = TSparseSpaceType::Dot(r,r); beta = (roh1 / roh0); TSparseSpaceType::ScaleAndAdd(1.00, r, beta, p); roh0 = roh1; BaseType::mResidualNorm = sqrt(roh1); BaseType::mIterationsNumber++; } while(BaseType::IterationNeeded() && (fabs(roh0) > 1.0e-30) KRATOS_CATCH(""); } */ /*@} */ /**@name Private Access */ /*@{ */ /*@} */ /**@name Private Inquiry */ /*@{ */ /*@} */ /**@name Un accessible methods */ /*@{ */ /*@} */ }; /* Class ResidualBasedEliminationDiscreteLaplacianBuilderAndSolver */ /*@} */ /**@name Type Definitions */ /*@{ */ /*@} */ } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_ELIMINATION_QUASI_INCOMPRESSIBLE_BUILDER_AND_SOLVER defined */
dynamic_module_load.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-aarch64-unknown-linux-gnu -ldl && %libomptarget-run-aarch64-unknown-linux-gnu %t.so 2>&1 | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-powerpc64-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-powerpc64le-ibm-linux-gnu -ldl && %libomptarget-run-powerpc64le-ibm-linux-gnu %t.so 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-x86_64-pc-linux-gnu -ldl && %libomptarget-run-x86_64-pc-linux-gnu %t.so 2>&1 | %fcheck-x86_64-pc-linux-gnu // RUN: %libomptarget-compile-nvptx64-nvidia-cuda -DSHARED -fPIC -shared -o %t.so && %clang %flags %s -o %t-nvptx64-nvidia-cuda -ldl && %libomptarget-run-nvptx64-nvidia-cuda %t.so 2>&1 | %fcheck-nvptx64-nvidia-cuda // [BOLT] It takes too long time. Let's remove this from tests. // UNSUPPORTED: clang-11 #ifdef SHARED #include <stdio.h> int foo() { #pragma omp target ; printf("%s\n", "DONE."); return 0; } #else #include <dlfcn.h> #include <stdio.h> int main(int argc, char **argv) { void *Handle = dlopen(argv[1], RTLD_NOW); int (*Foo)(void); if (Handle == NULL) { printf("dlopen() failed: %s\n", dlerror()); return 1; } Foo = (int (*)(void)) dlsym(Handle, "foo"); if (Handle == NULL) { printf("dlsym() failed: %s\n", dlerror()); return 1; } // CHECK: DONE. // CHECK-NOT: {{abort|fault}} return Foo(); } #endif
GB_emult_01_template.c
//------------------------------------------------------------------------------ // GB_emult_01_template: C=A.*B, C<M or !M>=A.*B when C is sparse/hyper //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Computes C=A.*B, C<M>=A.*B, or C<!M>=A.*B when C is sparse or hypersparse: // phase1: does not compute C itself, but just counts the # of entries in each // vector of C. Fine tasks compute the # of entries in their slice of a // single vector of C, and the results are cumsum'd. // phase2: computes C, using the counts computed by phase1. // No input matrix can be jumbled, and C is constructed as unjumbled. // The following cases are handled: // ------------------------------------------ // C = A .* B // ------------------------------------------ // sparse . sparse sparse (method: 01) // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse (method: 01) // sparse bitmap sparse sparse (method: 01) // sparse full sparse sparse (method: 01) // sparse sparse sparse bitmap (04a or 02a) // sparse sparse sparse full (04a or 02a) // sparse sparse bitmap sparse (04b or 02b) // sparse sparse full sparse (04b or 02b) // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse (01: M later) // sparse bitmap sparse sparse (method: 01) // sparse full sparse sparse (method: 01) // The 04a and 04b methods are not yet implemented. This method handles // those cases. Methods 02a and 02b can be used as well, but only if M is // applied later. See GB_emult_sparsity for this decision. { int taskid ; #pragma omp parallel for num_threads(C_nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < C_ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast == -1) ; int64_t len ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; len = TaskList [taskid].len ; } else { // a coarse task operates on one or more whole vectors len = vlen ; } //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of C //------------------------------------------------------------------ int64_t j = GBH (Ch, k) ; #if defined ( GB_PHASE_1_OF_2 ) int64_t cjnz = 0 ; #else int64_t pC, pC_end ; if (fine_task) { // A fine task computes a slice of C(:,j) pC = TaskList [taskid ].pC ; pC_end = TaskList [taskid+1].pC ; ASSERT (Cp [k] <= pC && pC <= pC_end && pC_end <= Cp [k+1]) ; } else { // The vectors of C are never sliced for a coarse task. pC = Cp [k] ; pC_end = Cp [k+1] ; } int64_t cjnz = pC_end - pC ; if (cjnz == 0) continue ; #endif //------------------------------------------------------------------ // get A(:,j) //------------------------------------------------------------------ int64_t pA = -1, pA_end = -1 ; if (fine_task) { // A fine task operates on Ai,Ax [pA...pA_end-1], which is // a subset of the vector A(:,j) pA = TaskList [taskid].pA ; pA_end = TaskList [taskid].pA_end ; } else { // A coarse task operates on the entire vector A (:,j) int64_t kA = (Ch == Ah) ? k : ((C_to_A == NULL) ? j : C_to_A [k]) ; if (kA >= 0) { pA = GBP (Ap, kA, vlen) ; pA_end = GBP (Ap, kA+1, vlen) ; } } int64_t ajnz = pA_end - pA ; // nnz in A(:,j) for this slice int64_t pA_start = pA ; bool adense = (ajnz == len) ; // get the first and last indices in A(:,j) for this vector int64_t iA_first = -1 ; if (ajnz > 0) { iA_first = GBI (Ai, pA, vlen) ; } #if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG ) int64_t iA_last = -1 ; if (ajnz > 0) { iA_last = GBI (Ai, pA_end-1, vlen) ; } #endif //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ int64_t pB = -1, pB_end = -1 ; if (fine_task) { // A fine task operates on Bi,Bx [pB...pB_end-1], which is // a subset of the vector B(:,j) pB = TaskList [taskid].pB ; pB_end = TaskList [taskid].pB_end ; } else { // A coarse task operates on the entire vector B (:,j) int64_t kB = (Ch == Bh) ? k : ((C_to_B == NULL) ? j : C_to_B [k]) ; if (kB >= 0) { pB = GBP (Bp, kB, vlen) ; pB_end = GBP (Bp, kB+1, vlen) ; } } int64_t bjnz = pB_end - pB ; // nnz in B(:,j) for this slice int64_t pB_start = pB ; bool bdense = (bjnz == len) ; // get the first and last indices in B(:,j) for this vector int64_t iB_first = -1 ; if (bjnz > 0) { iB_first = GBI (Bi, pB, vlen) ; } #if defined ( GB_PHASE_1_OF_2 ) || defined ( GB_DEBUG ) int64_t iB_last = -1 ; if (bjnz > 0) { iB_last = GBI (Bi, pB_end-1, vlen) ; } #endif //------------------------------------------------------------------ // get M(:,j) if M is sparse or hypersparse //------------------------------------------------------------------ int64_t pM = -1 ; int64_t pM_end = -1 ; if (M_is_sparse_or_hyper) { if (fine_task) { // A fine task operates on Mi,Mx [pM...pM_end-1], which is // a subset of the vector M(:,j) pM = TaskList [taskid].pM ; pM_end = TaskList [taskid].pM_end ; } else { int64_t kM = -1 ; if (Ch == Mh) { // Ch is the same as Mh (a shallow copy), or both NULL kM = k ; } else { kM = (C_to_M == NULL) ? j : C_to_M [k] ; } if (kM >= 0) { pM = GBP (Mp, kM, vlen) ; pM_end = GBP (Mp, kM+1, vlen) ; } } } //------------------------------------------------------------------ // C(:,j)<optional mask> = A (:,j) .* B (:,j) or subvector //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (ajnz == 0 || bjnz == 0) { //-------------------------------------------------------------- // A(:,j) and/or B(:,j) are empty //-------------------------------------------------------------- ; } else if (iA_last < iB_first || iB_last < iA_first) { //-------------------------------------------------------------- // intersection of A(:,j) and B(:,j) is empty //-------------------------------------------------------------- // the last entry of A(:,j) comes before the first entry // of B(:,j), or visa versa ; } else #endif if (M == NULL) { //-------------------------------------------------------------- // C = A.*B //-------------------------------------------------------------- // ------------------------------------------ // C = A .* B // ------------------------------------------ // sparse . sparse sparse (method: 01) // sparse sparse sparse sparse (01 M later) // both A and B are sparse/hyper ASSERT (A_is_sparse || A_is_hyper) ; ASSERT (B_is_sparse || B_is_hyper) ; if (ajnz > 32 * bjnz) { //---------------------------------------------------------- // A(:,j) is much denser than B(:,j) //---------------------------------------------------------- for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; // find i in A(:,j) int64_t pright = pA_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ai, pA, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else if (bjnz > 32 * ajnz) { //---------------------------------------------------------- // B(:,j) is much denser than A(:,j) //---------------------------------------------------------- for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; // find i in B(:,j) int64_t pright = pB_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Bi, pB, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //---------------------------------------------------------- // A(:,j) and B(:,j) about the sparsity //---------------------------------------------------------- // linear-time scan of A(:,j) and B(:,j) while (pA < pA_end && pB < pB_end) { int64_t iA = Ai [pA] ; int64_t iB = Bi [pB] ; if (iA < iB) { // A(i,j) exists but not B(i,j) pA++ ; } else if (iB < iA) { // B(i,j) exists but not A(i,j) pB++ ; } else { // both A(i,j) and B(i,j) exist // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = iB ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, iB, j) ; #endif pC++ ; #endif pA++ ; pB++ ; } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } } else if (M_is_sparse_or_hyper) { //-------------------------------------------------------------- // C and M are sparse or hypersparse //-------------------------------------------------------------- // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse sparse sparse sparse (method: 01) // sparse sparse sparse bitmap (04a or 02a) // sparse sparse sparse full (04a or 02a) // sparse sparse bitmap sparse (04b or 02b) // sparse sparse full sparse (04b or 02b) // Method 04 is not yet implemented; using this method // (GB_emult_01) instead. // ether A or B are sparse/hyper ASSERT (A_is_sparse || A_is_hyper || B_is_sparse || B_is_hyper); for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // get M(i,j) for A(i,j) .* B (i,j) //---------------------------------------------------------- int64_t i = GBI (Mi, pM, vlen) ; bool mij = GB_mcast (Mx, pM, msize) ; if (!mij) continue ; //---------------------------------------------------------- // get A(i,j) //---------------------------------------------------------- bool afound ; if (adense) { // A(:,j) is dense, bitmap, or full; use quick lookup pA = pA_start + i - iA_first ; afound = GBB (Ab, pA) ; } else { // A(:,j) is sparse; use binary search for A(i,j) int64_t apright = pA_end - 1 ; GB_BINARY_SEARCH (i, Ai, pA, apright, afound) ; } if (!afound) continue ; ASSERT (GBI (Ai, pA, vlen) == i) ; //---------------------------------------------------------- // get B(i,j) //---------------------------------------------------------- bool bfound ; if (bdense) { // B(:,j) is dense; use direct lookup for B(i,j) pB = pB_start + i - iB_first ; bfound = GBB (Bb, pB) ; } else { // B(:,j) is sparse; use binary search for B(i,j) int64_t bpright = pB_end - 1 ; GB_BINARY_SEARCH (i, Bi, pB, bpright, bfound) ; } if (!bfound) continue ; ASSERT (GBI (Bi, pB, vlen) == i) ; //---------------------------------------------------------- // C(i,j) = A(i,j) .* B(i,j) //---------------------------------------------------------- // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //-------------------------------------------------------------- // M is bitmap or full, for either C<M>=A.*B or C<!M>=A.*B //-------------------------------------------------------------- // ------------------------------------------ // C <M>= A .* B // ------------------------------------------ // sparse bitmap sparse sparse (method: 01) // sparse full sparse sparse (method: 01) // ------------------------------------------ // C <!M>= A .* B // ------------------------------------------ // sparse bitmap sparse sparse (method: 01) // sparse full sparse sparse (method: 01) // GB_GET_MIJ: get M(i,j) where M is bitmap or full #undef GB_GET_MIJ #define GB_GET_MIJ(i) \ int64_t pM = pM_start + i ; \ bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; \ if (Mask_comp) mij = !mij ; // both A and B are sparse/hyper ASSERT (A_is_sparse || A_is_hyper) ; ASSERT (B_is_sparse || B_is_hyper) ; int64_t pM_start = j * vlen ; if (ajnz > 32 * bjnz) { //---------------------------------------------------------- // A(:,j) much denser than B(:,j), M bitmap/full //---------------------------------------------------------- for ( ; pB < pB_end ; pB++) { int64_t i = Bi [pB] ; GB_GET_MIJ (i) ; if (mij) { // find i in A(:,j) int64_t pright = pA_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Ai, pA, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else if (bjnz > 32 * ajnz) { //---------------------------------------------------------- // B(:,j) much denser than A(:,j), M bitmap/full //---------------------------------------------------------- for ( ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; GB_GET_MIJ (i) ; if (mij) { // find i in B(:,j) int64_t pright = pB_end - 1 ; bool found ; GB_BINARY_SEARCH (i, Bi, pB, pright, found) ; if (found) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, i, j) ; #endif pC++ ; #endif } } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } else { //---------------------------------------------------------- // A(:,j) and B(:,j) about the same, M bitmap/full //---------------------------------------------------------- // linear-time scan of A(:,j) and B(:,j) while (pA < pA_end && pB < pB_end) { int64_t iA = Ai [pA] ; int64_t iB = Bi [pB] ; if (iA < iB) { // A(i,j) exists but not B(i,j) pA++ ; } else if (iB < iA) { // B(i,j) exists but not A(i,j) pB++ ; } else { // both A(i,j) and B(i,j) exist int64_t i = iA ; GB_GET_MIJ (i) ; if (mij) { // C (i,j) = A (i,j) .* B (i,j) #if defined ( GB_PHASE_1_OF_2 ) cjnz++ ; #else ASSERT (pC < pC_end) ; Ci [pC] = i ; #ifndef GB_ISO_EMULT GB_GETA (aij, Ax, pA, A_iso) ; GB_GETB (bij, Bx, pB, B_iso) ; GB_BINOP (GB_CX (pC), aij, bij, iB, j) ; #endif pC++ ; #endif } pA++ ; pB++ ; } } #if defined ( GB_PHASE_2_OF_2 ) ASSERT (pC == pC_end) ; #endif } } //------------------------------------------------------------------ // final count of nnz (C (:,j)) //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (fine_task) { TaskList [taskid].pC = cjnz ; } else { Cp [k] = cjnz ; } #endif } } }
convert_coo_x_csr.c
#include <alphasparse/opt.h> #include <memory.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include "alphasparse/format.h" #include "alphasparse/util.h" #ifdef __DCU__ #include "alphasparse/util/qsort_csr_struct.h" #endif alphasparse_status_t ONAME(const ALPHA_SPMAT_CSR *source, ALPHA_SPMAT_COO **dest) { ALPHA_SPMAT_COO *mat = alpha_malloc(sizeof(ALPHA_SPMAT_COO)); *dest = mat; ALPHA_INT m = source->rows; ALPHA_INT n = source->cols; ALPHA_INT nnz = source->rows_end[m - 1]; ALPHA_INT num_threads = alpha_get_thread_num(); mat->rows = m; mat->cols = n; ALPHA_INT *rows_indx = alpha_memalign((uint64_t)(nnz) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); ALPHA_INT *cols_indx = alpha_memalign((uint64_t)(nnz) * sizeof(ALPHA_INT), DEFAULT_ALIGNMENT); mat->values = alpha_memalign((uint64_t)nnz * sizeof(ALPHA_Number), DEFAULT_ALIGNMENT); mat->row_indx = rows_indx; mat->col_indx = cols_indx; #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; r++) { for (ALPHA_INT ai = source->rows_start[r]; ai < source->rows_end[r]; ai++) { rows_indx[ai] = r; } } memcpy(cols_indx, source->col_indx, (uint64_t)sizeof(ALPHA_INT) * nnz); memcpy(mat->values, source->values, (uint64_t)sizeof(ALPHA_Number) * nnz); mat->nnz = nnz; #ifdef __DCU__ mat->ordered = source->ordered; coo_order(mat); #endif mat->d_rows_indx = NULL; mat->d_cols_indx = NULL; mat->d_values = NULL; return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_unop__atan_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__atan_fp64_fp64 // op(A') function: GB_unop_tran__atan_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = atan (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atan (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = atan (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__atan_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__atan_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_uint32 // op(A') function: GB_tran__abs_int8_uint32 // C type: int8_t // A type: uint32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_uint32 ( int8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_Global.c
//------------------------------------------------------------------------------ // GB_Global: global values in GraphBLAS //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // All Global storage is declared, initialized, and accessed here. The // contents of the GB_Global struct are only accessible to functions in this // file. Global storage is used to keep track of the GraphBLAS mode (blocking // or non-blocking), for pointers to malloc/calloc/realloc/free functions, // global matrix options, and other settings. #include "GB_atomics.h" //------------------------------------------------------------------------------ // Global storage: for all threads in a user application that uses GraphBLAS //------------------------------------------------------------------------------ typedef struct { void *queue_head ; // TODO in 4.0: delete //-------------------------------------------------------------------------- // blocking/non-blocking mode, set by GrB_init //-------------------------------------------------------------------------- GrB_Mode mode ; // GrB_NONBLOCKING or GrB_BLOCKING bool GrB_init_called ; // true if GrB_init already called //-------------------------------------------------------------------------- // threading and MKL control //-------------------------------------------------------------------------- bool use_mkl ; // control usage of Intel MKL int nthreads_max ; // max number of threads to use double chunk ; // chunk size for determining # threads to use //-------------------------------------------------------------------------- // hypersparsity and CSR/CSC format control //-------------------------------------------------------------------------- double hyper_ratio ; // default hyper_ratio for new matrices bool is_csc ; // default CSR/CSC format for new matrices //-------------------------------------------------------------------------- // abort function: only used for debugging //-------------------------------------------------------------------------- void (* abort_function ) (void) ; //-------------------------------------------------------------------------- // malloc/calloc/realloc/free: memory management functions //-------------------------------------------------------------------------- // All threads must use the same malloc/calloc/realloc/free functions. // They default to the ANSI C11 functions, but can be defined by GxB_init. void * (* malloc_function ) (size_t) ; void * (* calloc_function ) (size_t, size_t) ; void * (* realloc_function ) (void *, size_t) ; void (* free_function ) (void *) ; bool malloc_is_thread_safe ; // default is true //-------------------------------------------------------------------------- // memory usage tracking: for testing and debugging only //-------------------------------------------------------------------------- // malloc_tracking: default is false. There is no user-accessible API for // setting this to true. If true, the following statistics are computed. // If false, all of the following are unused. // nmalloc: To aid in searching for memory leaks, GraphBLAS keeps track of // the number of blocks of allocated that have not yet been freed. The // count starts at zero. GB_malloc_memory and GB_calloc_memory increment // this count, and free (of a non-NULL pointer) decrements it. realloc // increments the count it if is allocating a new block, but it does this // by calling GB_malloc_memory. // malloc_debug: this is used for testing only (GraphBLAS/Tcov). If true, // then use malloc_debug_count for testing memory allocation and // out-of-memory conditions. If malloc_debug_count > 0, the value is // decremented after each allocation of memory. If malloc_debug_count <= // 0, the GB_*_memory routines pretend to fail; returning NULL and not // allocating anything. bool malloc_tracking ; // true if allocations are being tracked int64_t nmalloc ; // number of blocks allocated but not freed bool malloc_debug ; // if true, test memory handling int64_t malloc_debug_count ; // for testing memory handling //-------------------------------------------------------------------------- // for testing and development //-------------------------------------------------------------------------- int64_t hack ; // ad hoc setting (for draft versions only) bool burble ; // controls GBBURBLE output //-------------------------------------------------------------------------- // for MATLAB interface only //-------------------------------------------------------------------------- bool print_one_based ; // if true, print 1-based indices //-------------------------------------------------------------------------- // CUDA (DRAFT: in progress) //-------------------------------------------------------------------------- int gpu_count ; // # of GPUs in the system GrB_Desc_Value gpu_control ; // always, never, or default double gpu_chunk ; // min problem size for using a GPU // properties of each GPU: GB_cuda_device gpu_properties [GB_CUDA_MAX_GPUS] ; } GB_Global_struct ; GB_PUBLIC GB_Global_struct GB_Global ; GB_Global_struct GB_Global = { .queue_head = NULL, // TODO in 4.0: delete // GraphBLAS mode .mode = GrB_NONBLOCKING, // default is nonblocking // initialization flag .GrB_init_called = false, // GrB_init has not yet been called // Intel MKL control (DRAFT: in progress) .use_mkl = false, // if true, exploit the Intel MKL // max number of threads and chunk size .nthreads_max = 1, .chunk = GB_CHUNK_DEFAULT, // default format .hyper_ratio = GB_HYPER_DEFAULT, .is_csc = (GB_FORMAT_DEFAULT != GxB_BY_ROW), // default is GxB_BY_ROW // abort function for debugging only .abort_function = abort, // malloc/calloc/realloc/free functions: default to ANSI C11 functions .malloc_function = malloc, .calloc_function = calloc, .realloc_function = realloc, .free_function = free, .malloc_is_thread_safe = true, // malloc tracking, for testing, statistics, and debugging only .malloc_tracking = false, .nmalloc = 0, // memory block counter .malloc_debug = false, // do not test memory handling .malloc_debug_count = 0, // counter for testing memory handling // for testing and development only .hack = 0, // diagnostics .burble = false, // for MATLAB interface only .print_one_based = false, // if true, print 1-based indices // CUDA environment (DRAFT: in progress) .gpu_count = 0, // # of GPUs in the system .gpu_control = GxB_DEFAULT, // always, never, or default .gpu_chunk = GB_GPU_CHUNK_DEFAULT // min problem size for using a GPU } ; //============================================================================== // GB_Global access functions //============================================================================== // TODO in 4.0: delete: GB_PUBLIC void GB_Global_queue_head_set (void *p) { GB_Global.queue_head = p ; } GB_PUBLIC void *GB_Global_queue_head_get (void) { return (GB_Global.queue_head) ; } //------------------------------------------------------------------------------ // mode //------------------------------------------------------------------------------ void GB_Global_mode_set (GrB_Mode mode) { GB_Global.mode = mode ; } GrB_Mode GB_Global_mode_get (void) { return (GB_Global.mode) ; } //------------------------------------------------------------------------------ // GrB_init_called //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB interface only void GB_Global_GrB_init_called_set (bool GrB_init_called) { GB_Global.GrB_init_called = GrB_init_called ; } GB_PUBLIC // accessed by the MATLAB interface only bool GB_Global_GrB_init_called_get (void) { return (GB_Global.GrB_init_called) ; } //------------------------------------------------------------------------------ // nthreads_max //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB interface only void GB_Global_nthreads_max_set (int nthreads_max) { GB_Global.nthreads_max = GB_IMAX (nthreads_max, 1) ; } GB_PUBLIC // accessed by the MATLAB interface only int GB_Global_nthreads_max_get (void) { return (GB_Global.nthreads_max) ; } //------------------------------------------------------------------------------ // OpenMP max_threads //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only int GB_Global_omp_get_max_threads (void) { return (GB_OPENMP_MAX_THREADS) ; } //------------------------------------------------------------------------------ // chunk //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB interface only void GB_Global_chunk_set (double chunk) { if (chunk <= GxB_DEFAULT) chunk = GB_CHUNK_DEFAULT ; GB_Global.chunk = fmax (chunk, 1) ; } GB_PUBLIC // accessed by the MATLAB interface only double GB_Global_chunk_get (void) { return (GB_Global.chunk) ; } //------------------------------------------------------------------------------ // hyper_ratio //------------------------------------------------------------------------------ void GB_Global_hyper_ratio_set (double hyper_ratio) { GB_Global.hyper_ratio = hyper_ratio ; } double GB_Global_hyper_ratio_get (void) { return (GB_Global.hyper_ratio) ; } //------------------------------------------------------------------------------ // use_mkl //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB interface only void GB_Global_use_mkl_set (bool use_mkl) { GB_Global.use_mkl = use_mkl ; } GB_PUBLIC // accessed by the MATLAB interface only bool GB_Global_use_mkl_get (void) { return (GB_Global.use_mkl) ; } //------------------------------------------------------------------------------ // is_csc //------------------------------------------------------------------------------ void GB_Global_is_csc_set (bool is_csc) { GB_Global.is_csc = is_csc ; } bool GB_Global_is_csc_get (void) { return (GB_Global.is_csc) ; } //------------------------------------------------------------------------------ // abort_function //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB interface only void GB_Global_abort_function_set (void (* abort_function) (void)) { GB_Global.abort_function = abort_function ; } GB_PUBLIC // accessed by the MATLAB interface only void GB_Global_abort_function (void) { GB_Global.abort_function ( ) ; } //------------------------------------------------------------------------------ // malloc_function //------------------------------------------------------------------------------ void GB_Global_malloc_function_set (void * (* malloc_function) (size_t)) { GB_Global.malloc_function = malloc_function ; } void * GB_Global_malloc_function (size_t size) { bool ok = true ; void *p = NULL ; if (GB_Global.malloc_is_thread_safe) { p = GB_Global.malloc_function (size) ; } else { #pragma omp critical(GB_malloc_protection) { p = GB_Global.malloc_function (size) ; } } return (ok ? p : NULL) ; } //------------------------------------------------------------------------------ // calloc_function //------------------------------------------------------------------------------ void GB_Global_calloc_function_set (void * (* calloc_function) (size_t, size_t)) { GB_Global.calloc_function = calloc_function ; } void * GB_Global_calloc_function (size_t count, size_t size) { bool ok = true ; void *p = NULL ; if (GB_Global.malloc_is_thread_safe) { p = GB_Global.calloc_function (count, size) ; } else { #pragma omp critical(GB_malloc_protection) { p = GB_Global.calloc_function (count, size) ; } } return (ok ? p : NULL) ; } //------------------------------------------------------------------------------ // realloc_function //------------------------------------------------------------------------------ void GB_Global_realloc_function_set ( void * (* realloc_function) (void *, size_t) ) { GB_Global.realloc_function = realloc_function ; } bool GB_Global_have_realloc_function (void) { return (GB_Global.realloc_function != NULL) ; } void * GB_Global_realloc_function (void *p, size_t size) { bool ok = true ; void *pnew = NULL ; if (GB_Global.malloc_is_thread_safe) { pnew = GB_Global.realloc_function (p, size) ; } else { #pragma omp critical(GB_malloc_protection) { pnew = GB_Global.realloc_function (p, size) ; } } return (ok ? pnew : NULL) ; } //------------------------------------------------------------------------------ // free_function //------------------------------------------------------------------------------ void GB_Global_free_function_set (void (* free_function) (void *)) { GB_Global.free_function = free_function ; } void GB_Global_free_function (void *p) { if (GB_Global.malloc_is_thread_safe) { GB_Global.free_function (p) ; } else { #pragma omp critical(GB_malloc_protection) { GB_Global.free_function (p) ; } } } //------------------------------------------------------------------------------ // malloc_is_thread_safe //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_Global_malloc_is_thread_safe_set (bool malloc_is_thread_safe) { GB_Global.malloc_is_thread_safe = malloc_is_thread_safe ; } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only bool GB_Global_malloc_is_thread_safe_get (void) { return (GB_Global.malloc_is_thread_safe) ; } //------------------------------------------------------------------------------ // malloc_tracking //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_Global_malloc_tracking_set (bool malloc_tracking) { GB_Global.malloc_tracking = malloc_tracking ; } bool GB_Global_malloc_tracking_get (void) { return (GB_Global.malloc_tracking) ; } //------------------------------------------------------------------------------ // nmalloc //------------------------------------------------------------------------------ void GB_Global_nmalloc_clear (void) { GB_ATOMIC_WRITE GB_Global.nmalloc = 0 ; } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only int64_t GB_Global_nmalloc_get (void) { int64_t nmalloc ; GB_ATOMIC_READ nmalloc = GB_Global.nmalloc ; return (nmalloc) ; } void GB_Global_nmalloc_increment (void) { GB_ATOMIC_UPDATE GB_Global.nmalloc++ ; } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_Global_nmalloc_decrement (void) { GB_ATOMIC_UPDATE GB_Global.nmalloc-- ; } //------------------------------------------------------------------------------ // malloc_debug //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_Global_malloc_debug_set (bool malloc_debug) { GB_ATOMIC_WRITE GB_Global.malloc_debug = malloc_debug ; } bool GB_Global_malloc_debug_get (void) { bool malloc_debug ; GB_ATOMIC_READ malloc_debug = GB_Global.malloc_debug ; return (malloc_debug) ; } //------------------------------------------------------------------------------ // malloc_debug_count //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_Global_malloc_debug_count_set (int64_t malloc_debug_count) { GB_ATOMIC_WRITE GB_Global.malloc_debug_count = malloc_debug_count ; } bool GB_Global_malloc_debug_count_decrement (void) { GB_ATOMIC_UPDATE GB_Global.malloc_debug_count-- ; int64_t malloc_debug_count ; GB_ATOMIC_READ malloc_debug_count = GB_Global.malloc_debug_count ; return (malloc_debug_count <= 0) ; } //------------------------------------------------------------------------------ // hack: for setting an internal value for development only //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_Global_hack_set (int64_t hack) { GB_Global.hack = hack ; } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only int64_t GB_Global_hack_get (void) { return (GB_Global.hack) ; } //------------------------------------------------------------------------------ // burble: for controlling the burble output //------------------------------------------------------------------------------ void GB_Global_burble_set (bool burble) { GB_Global.burble = burble ; } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only bool GB_Global_burble_get (void) { return (GB_Global.burble) ; } //------------------------------------------------------------------------------ // for MATLAB interface only //------------------------------------------------------------------------------ GB_PUBLIC // accessed by the MATLAB interface only void GB_Global_print_one_based_set (bool onebased) { GB_Global.print_one_based = onebased ; } GB_PUBLIC // accessed by the MATLAB interface only bool GB_Global_print_one_based_get (void) { return (GB_Global.print_one_based) ; } //------------------------------------------------------------------------------ // CUDA (DRAFT: in progress) //------------------------------------------------------------------------------ void GB_Global_gpu_control_set (GrB_Desc_Value gpu_control) { // set the GPU control to always, never, or default if (GB_Global.gpu_count > 0) { // one or more GPUs are available: set gpu_control to // always, never, or default. if (gpu_control == GxB_GPU_ALWAYS || gpu_control == GxB_GPU_NEVER) { GB_Global.gpu_control = gpu_control ; } else { GB_Global.gpu_control = GxB_DEFAULT ; } } else { // no GPUs available: never use a GPU GB_Global.gpu_control = GxB_GPU_NEVER ; } } GrB_Desc_Value GB_Global_gpu_control_get (void) { // get the GPU control parameter return (GB_Global.gpu_control) ; } void GB_Global_gpu_chunk_set (double gpu_chunk) { // set the GPU chunk factor if (gpu_chunk < 1) gpu_chunk = GB_GPU_CHUNK_DEFAULT ; GB_Global.gpu_chunk = gpu_chunk ; } double GB_Global_gpu_chunk_get (void) { // get the GPU chunk factor return (GB_Global.gpu_chunk) ; } bool GB_Global_gpu_count_set (bool enable_cuda) { // set the # of GPUs in the system; // this function is only called once, by GB_init. #if defined ( GBCUDA ) if (enable_cuda) { return (GB_cuda_get_device_count (&GB_Global.gpu_count)) ; } else #endif { // no GPUs available, or available but not requested GB_Global.gpu_count = 0 ; return (true) ; } } int GB_Global_gpu_count_get (void) { // get the # of GPUs in the system return (GB_Global.gpu_count) ; } #define GB_GPU_DEVICE_CHECK(error) \ if (device < 0 || device >= GB_Global.gpu_count) return (error) ; size_t GB_Global_gpu_memorysize_get (int device) { // get the memory size of a specific GPU GB_GPU_DEVICE_CHECK (0) ; // memory size zero if invalid GPU return (GB_Global.gpu_properties [device].total_global_memory) ; } int GB_Global_gpu_sm_get (int device) { // get the # of SMs in a specific GPU GB_GPU_DEVICE_CHECK (0) ; // zero if invalid GPU return (GB_Global.gpu_properties [device].number_of_sms) ; } bool GB_Global_gpu_device_properties_get (int device) { // get all properties of a specific GPU; // this function is only called once per GPU, by GB_init. GB_GPU_DEVICE_CHECK (false) ; // fail if invalid GPU #if defined ( GBCUDA ) return (GB_cuda_get_device_properties (device, &(GB_Global.gpu_properties [device]))) ; #else // if no GPUs exist, they cannot be queried return (false) ; #endif }
nukedclan_fmt_plug.c
/* Nuked-Klan CMS DB cracker patch for JtR. Hacked together during * July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * Input Format => user:$nk$*HASHKEY*hash * * Where, * * HASHKEY => hex(HASHKEY value found in conf.inc.php) * * Modified by JimF, Jul 2012. About 6x speed improvements. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_nk; #elif FMT_REGISTERS_H john_register_one(&fmt_nk); #else #include <string.h> #include "arch.h" #include "md5.h" #include "sha.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "common.h" #ifdef _OPENMP #include <omp.h> // Tuned on core i7 quad HT // 1 5059K // 16 8507k // 64 8907k ** this was chosen. // 128 8914k // 256 8810k #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "nk" #define FORMAT_NAME "Nuked-Klan CMS" #define FORMAT_TAG "$nk$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "SHA1 MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 /* change to 0 once there's any speedup for "many salts" */ #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH (4+32+40+3+1) #define BINARY_SIZE 16 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 static struct fmt_tests nk_tests[] = { {"$nk$*379637b4fcde21b2c5fbc9a00af505e997443267*#17737d3661312121d5ae7d5c6156c0298", "openwall"}, {"$nk$*379637b4fcde21b2c5fbc9a00af505e997443267*#5c20384512ee36590f5f0ab38a46c6ced", "password"}, // from pass_gen.pl {"$nk$*503476424c5362476f36463630796a6e6c656165*#2f27c20e65b88b76c913115cdec3d9a18", "test1"}, {"$nk$*7a317a71794339586c434d50506b6e4356626a67*#b62a615f605c2fd520edde76577d30f90", "thatsworking"}, {"$nk$*796b7375666d7545695032413769443977644132*#4aec90bd9a930faaa42a0d7d40056132e", "test3"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { unsigned char HASHKEY[41]; int decal; } *cur_salt; inline static void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; memcpy(out, ciphertext, CIPHERTEXT_LENGTH); out[CIPHERTEXT_LENGTH] = 0; strlwr(out); return out; } static int valid(char *ciphertext, struct fmt_main *self) { char *ptr, *ctcopy, *keeptr; int extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; /* skip leading "$nk$*" */ if (!(ptr = strtokm(ctcopy, "*"))) goto error; /* HASHKEY is of fixed length 40 */ if (hexlenl(ptr, &extra) != 40 || extra) goto error; if (!(ptr = strtokm(NULL, "*"))) goto error; /* skip two characters, for "nk_tests[]" this is '#' * followed by decal value */ if (strlen(ptr) <= 2) goto error; ptr += 2; /* hash is of fixed length 32 */ if (hexlenl(ptr, &extra) != 32 || extra) goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char _ctcopy[256], *ctcopy=_ctcopy; char *p; int i; strnzcpy(ctcopy, ciphertext, 255); ctcopy += FORMAT_TAG_LEN; /* skip over "$nk$*" */ p = strtokm(ctcopy, "*"); for (i = 0; i < 20; i++) cs.HASHKEY[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.decal = atoi16[ARCH_INDEX(p[1])]; return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE+1]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1 + 2; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char pass[40+1]; unsigned char out[80]; int i, k; int idx = 0; MD5_CTX c; SHA_CTX ctx; SHA1_Init(&ctx); SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA1_Final(out, &ctx); hex_encode(out, 20, pass); for (i = 0, k=cur_salt->decal; i < 40; ++i, ++k) { out[idx++] = pass[i]; if (k>19) k = 0; out[idx++] = cur_salt->HASHKEY[k]; } MD5_Init(&c); MD5_Update(&c, out, 80); MD5_Final((unsigned char*)crypt_out[index], &c); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (*((uint32_t*)binary) == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return *((uint32_t*)binary) == crypt_out[index][0]; } static int cmp_exact(char *source, int index) { void *binary = get_binary(source); return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static void nk_set_key(char *key, int index) { strcpy(saved_key[index], key); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_nk = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, nk_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, nk_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__isne_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_01__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_02__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_03__isne_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int64) // A*D function (colscale): GB (_AxD__isne_int64) // D*A function (rowscale): GB (_DxB__isne_int64) // C+=B function (dense accum): GB (_Cdense_accumB__isne_int64) // C+=b function (dense accum): GB (_Cdense_accumb__isne_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int64) // C=scalar+B GB (_bind1st__isne_int64) // C=scalar+B' GB (_bind1st_tran__isne_int64) // C=A+scalar GB (_bind2nd__isne_int64) // C=A'+scalar GB (_bind2nd_tran__isne_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
argon2_fmt_plug.c
/* * This software is Copyright (c) 2016 Agnieszka Bielec <bielecagnieszka8 at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * merged argon2d and argon2i into a single format file. JimF. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_argon2; #elif FMT_REGISTERS_H john_register_one(&fmt_argon2); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "argon2.h" #include "argon2_core.h" #include "argon2_encoding.h" #include "memdbg.h" #define FORMAT_LABEL "argon2" #define FORMAT_NAME "" #define FORMAT_TAG_d "$argon2d$" #define FORMAT_TAG_i "$argon2i$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG_d)-1) #if defined(__XOP__) #define ALGORITHM_NAME "Blake2 XOP" #elif defined(__AVX__) #define ALGORITHM_NAME "Blake2 AVX" #elif defined(__SSSE3__) #define ALGORITHM_NAME "Blake2 SSSE3" #elif defined(__SSE2__) #define ALGORITHM_NAME "Blake2 SSE2" #else #define ALGORITHM_NAME "Blake2" #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 100 //only in john #define BINARY_SIZE 256 //only in john #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE 64 //only in john #define SALT_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define OMP_SCALE 16 #ifdef _OPENMP #define THREAD_NUMBER omp_get_thread_num() #else #define THREAD_NUMBER 1 #endif static struct fmt_tests tests[] = { {"$argon2d$v=19$m=4096,t=3,p=1$ZGFtYWdlX2RvbmU$w9w3s5/zV8+PcAZlJhnTCOE+vBkZssmZf6jOq3dKv50","password"}, {"$argon2i$v=19$m=4096,t=3,p=1$ZGFtYWdlX2RvbmU$N59QwnpxDQZRj1/cO6bqm408dD6Z2Z9LKYpwFJSPVKA","password"}, {"$argon2d$v=19$m=4096,t=3,p=1$c2hvcnRfc2FsdA$zMrTcOAOUje6UqObRVh84Pe1K6gumcDqqGzRM0ILzYmj","sacrificed"}, {"$argon2i$v=19$m=4096,t=3,p=1$c2hvcnRfc2FsdA$1l4kAwUdAApoCbFH7ghBEf7bsdrOQzE4axIJ3PV0Ncrd","sacrificed"}, {"$argon2d$v=19$m=16384,t=3,p=1$c2hvcnRfc2FsdA$TLSTPihIo+5F67Y1vJdfWdB9","blessed_dead"}, {"$argon2i$v=19$m=16384,t=3,p=1$c2hvcnRfc2FsdA$vvjDVog22A5x9eljmB+2yC8y","blessed_dead"}, {"$argon2d$v=19$m=16384,t=4,p=3$YW5vdGhlcl9zYWx0$yw93eMxC8REPAwbQ0e/q43jR9+RI9HI/DHP75uzm7tQfjU734oaI3dzcMWjYjHzVQD+J4+MG+7oyD8dN/PtnmPCZs+UZ67E+rkXJ/wTvY4WgXgAdGtJRrAGxhy4rD7d5G+dCpqhrog","death_dying"}, {"$argon2i$v=19$m=16384,t=4,p=3$YW5vdGhlcl9zYWx0$K7unxwO5aeuZCpnIJ06FMCRKod3eRg8oIRzQrK3E6mGbyqlTvvl47jeDWq/5drF1COJkEF9Ty7FWXJZHa+vqlf2YZGp/4qSlAvKmdtJ/6JZU32iQItzMRwcfujHE+PBjbL5uz4966A","death_dying"}, {NULL} }; struct argon2_salt { uint32_t t_cost, m_cost, lanes; uint32_t hash_size; uint32_t salt_length; char salt[SALT_SIZE]; argon2_type type; }; static struct argon2_salt saved_salt; static region_t * memory; static void **pseudo_rands; static char *saved_key; static int threads; static size_t saved_mem_size; static uint32_t saved_segment_length; static unsigned char *crypted; static void *get_salt(char *ciphertext); static void init(struct fmt_main *self) { int i; #ifdef _OPENMP int omp_t = omp_get_max_threads(); threads=omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #else threads=1; #endif saved_key = malloc(self->params.max_keys_per_crypt * (PLAINTEXT_LENGTH + 1)); memset(saved_key, 0, self->params.max_keys_per_crypt * (PLAINTEXT_LENGTH + 1)); crypted = malloc(self->params.max_keys_per_crypt * (BINARY_SIZE)); memset(crypted, 0, self->params.max_keys_per_crypt * (BINARY_SIZE)); memory=malloc(threads*sizeof(region_t)); pseudo_rands=malloc(threads*sizeof(void*)); for (i=0;i<threads;i++) { init_region_t(&memory[i]); pseudo_rands[i]=NULL; } saved_mem_size=0; saved_segment_length=0; } static void done(void) { int i; free(saved_key); free(crypted); for (i=0;i<threads;i++) { free_region_t(&memory[i]); free(pseudo_rands[i]); } free(memory); free(pseudo_rands); } static void print_memory(double memory) { char s[]="\0kMGT"; int i=0; while(memory>=1024) { memory/=1024; i++; } printf("memory per hash : %.2lf %cB\n",memory,s[i]); } static void reset(struct db_main *db) { static int printed=0; if (!printed && options.verbosity > VERB_LEGACY) { int i; uint32_t m_cost, prev_m_cost; m_cost=prev_m_cost=0; if (!db) { for (i = 0; tests[i].ciphertext; i++) { struct argon2_salt *salt; salt=get_salt(tests[i].ciphertext); m_cost = MAX(m_cost, salt->m_cost); if (i==0) { printf("\n"); prev_m_cost=m_cost; print_memory(sizeof(block)*m_cost); } } if (prev_m_cost!=m_cost) { printf("max "); print_memory(sizeof(block)*m_cost); } } else { struct db_salt *salts = db->salts; while (salts != NULL) { struct argon2_salt * salt=salts->salt; m_cost = MAX(m_cost, salt->m_cost); salts = salts->next; } printf("\n"); print_memory(sizeof(block)*m_cost); } } } static void ctx_init(argon2_context *ctx) { //size_t maxadlen = ctx->adlen; //size_t maxsaltlen = ctx->saltlen; //size_t maxoutlen = ctx->outlen; static uint8_t out[BINARY_SIZE]; static uint8_t salt[SALT_SIZE]; ctx->adlen=0; ctx->saltlen=SALT_SIZE; ctx->outlen=BINARY_SIZE; ctx->out=out; ctx->salt=salt; } static int valid(char *ciphertext, struct fmt_main *self) { argon2_context ctx; int res; ctx_init(&ctx); if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN)) res=argon2_decode_string(&ctx, ciphertext, Argon2_d); else if (!strncmp(ciphertext, FORMAT_TAG_i, FORMAT_TAG_LEN)) res=argon2_decode_string(&ctx, ciphertext, Argon2_i); else return 0; if (res!=ARGON2_OK || ctx.outlen < 8) return 0; return 1; } static void set_key(char *key, int index) { int len; len = strlen(key); if (len > PLAINTEXT_LENGTH) len = PLAINTEXT_LENGTH; memcpy(saved_key + index * (PLAINTEXT_LENGTH + 1), key, len); saved_key[index * (PLAINTEXT_LENGTH + 1) + len] = 0; } static char *get_key(int index) { return saved_key + index * (PLAINTEXT_LENGTH + 1); } static void *get_binary(char *ciphertext) { static char out[BINARY_SIZE]; argon2_context ctx; ctx_init(&ctx); if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN)) argon2_decode_string(&ctx, ciphertext, Argon2_d); else argon2_decode_string(&ctx, ciphertext, Argon2_i); memset(out, 0, BINARY_SIZE); memcpy(out, ctx.out, ctx.outlen); return out; } static void *get_salt(char *ciphertext) { static struct argon2_salt salt; argon2_context ctx; memset(&salt,0,sizeof(salt)); ctx_init(&ctx); if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN)) { argon2_decode_string(&ctx, ciphertext, Argon2_d); salt.type = Argon2_d; } else { argon2_decode_string(&ctx, ciphertext, Argon2_i); salt.type = Argon2_i; } salt.salt_length = ctx.saltlen; salt.m_cost = ctx.m_cost; salt.t_cost = ctx.t_cost; salt.lanes = ctx.lanes; salt.hash_size = ctx.outlen; memcpy(salt.salt, ctx.salt, ctx.saltlen); return (void *)&salt; } static void set_salt(void *salt) { uint32_t i; size_t mem_size; uint32_t segment_length, memory_blocks; memcpy(&saved_salt,salt,sizeof(struct argon2_salt)); mem_size=sizeof(block)*saved_salt.m_cost; memory_blocks = saved_salt.m_cost; if (memory_blocks < 2 * ARGON2_SYNC_POINTS * saved_salt.lanes) { memory_blocks = 2 * ARGON2_SYNC_POINTS * saved_salt.lanes; } segment_length = memory_blocks / (saved_salt.lanes * ARGON2_SYNC_POINTS); if (mem_size>saved_mem_size) { if (saved_mem_size>0) for (i=0;i<threads;i++) free_region_t(&memory[i]); for (i=0;i<threads;i++) alloc_region_t(&memory[i],mem_size); saved_mem_size=mem_size; } if (segment_length>saved_segment_length) { if (saved_segment_length>0) for (i=0;i<threads;i++) free(pseudo_rands[i]); for (i=0;i<threads;i++) pseudo_rands[i]=malloc(sizeof(uint64_t) * segment_length); saved_segment_length=segment_length; } } static int cmp_all(void *binary, int count) { int i; for (i = 0; i < count; i++) { if (!memcmp(binary, crypted + i * BINARY_SIZE, saved_salt.hash_size)) return 1; } return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypted + index * BINARY_SIZE, saved_salt.hash_size); } static int cmp_exact(char *source, int index) { return 1; } static int crypt_all(int *pcount, struct db_salt *salt) { int i; const int count = *pcount; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { argon2_hash(saved_salt.t_cost, saved_salt.m_cost, saved_salt.lanes, saved_key + i * (PLAINTEXT_LENGTH + 1), strlen(saved_key + i * (PLAINTEXT_LENGTH + 1)), saved_salt.salt, saved_salt.salt_length, crypted + i * BINARY_SIZE, saved_salt.hash_size, 0, 0, saved_salt.type, ARGON2_VERSION_NUMBER, memory[THREAD_NUMBER%threads].aligned, pseudo_rands[THREAD_NUMBER%threads]); } return count; } static int get_hash_0(int index) { uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE); return crypt[0] & PH_MASK_0; } static int get_hash_1(int index) { uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE); return crypt[0] & PH_MASK_1; } static int get_hash_2(int index) { uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE); return crypt[0] & PH_MASK_2; } static int get_hash_3(int index) { uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE); return crypt[0] & PH_MASK_3; } static int get_hash_4(int index) { uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE); return crypt[0] & PH_MASK_4; } static int get_hash_5(int index) { uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE); return crypt[0] & PH_MASK_5; } static int get_hash_6(int index) { uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE); return crypt[0] & PH_MASK_6; } static int salt_hash(void *_salt) { int i; struct argon2_salt *salt = (struct argon2_salt*)_salt; unsigned int hash = 0; char *p = salt->salt; for (i=0;i<salt->salt_length;i++) { hash <<= 1; hash += (unsigned char)*p++; if (hash >> SALT_HASH_LOG) { hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); } } hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); return hash; } #if FMT_MAIN_VERSION > 11 static unsigned int tunable_cost_t(void *_salt) { struct argon2_salt *salt=(struct argon2_salt *)_salt; return salt->t_cost; } static unsigned int tunable_cost_m(void *_salt) { struct argon2_salt *salt=(struct argon2_salt *)_salt; return salt->m_cost; } static unsigned int tunable_cost_p(void *_salt) { struct argon2_salt *salt=(struct argon2_salt *)_salt; return salt->lanes; } static unsigned int tunable_cost_type(void *_salt) { struct argon2_salt *salt=(struct argon2_salt *)_salt; return (int)salt->type; } #endif struct fmt_main fmt_argon2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, sizeof(struct argon2_salt), SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | #endif FMT_CASE | FMT_8_BIT, { "t", "m", "p", "type [0:Argon2d 1:Argon2i]" }, {0}, tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { tunable_cost_t, tunable_cost_m, tunable_cost_p, tunable_cost_type, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
GB_unop__identity_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__(none)) // op(A') function: GB (_unop_tran__identity_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ #if 0 GrB_Info GB (_unop_apply__(none)) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
WeightedMeanFilter.h
/* * ErosionFilter.h * * Created on: 13.06.2016 * Author: Darius Malysiak */ #ifndef IMAGEPROCESSING_WEIGHTEDMEANFILTER_H_ #define IMAGEPROCESSING_WEIGHTEDMEANFILTER_H_ #include "../BaseObject.h" #include "../DataStructures/Matrix.h" #include "../DataStructures/Image.h" namespace Lazarus { /** * This filter will compute a weighted mean of a pixels region and assign this mean to the pixel. * */ template<typename T> class WeightedMeanFilter: public Lazarus::BaseObject { public: static const Lazarus::Matrix2<double>* get_HOM_MEAN_3x3_KERNEL(double val) { static Lazarus::Matrix2<double> _EROSION_KERNEL; _EROSION_KERNEL.initMatrix(3,3); _EROSION_KERNEL.setData(0,0,val); _EROSION_KERNEL.setData(0,1,val); _EROSION_KERNEL.setData(0,2,val); _EROSION_KERNEL.setData(1,0,val); _EROSION_KERNEL.setData(1,1,val); _EROSION_KERNEL.setData(1,2,val); _EROSION_KERNEL.setData(2,0,val); _EROSION_KERNEL.setData(2,1,val); _EROSION_KERNEL.setData(2,2,val); return &_EROSION_KERNEL; } static const Lazarus::Matrix2<double>* get_HOM_MEAN_KERNEL(double val, unsigned int size) { Lazarus::Matrix2<double>* _EROSION_KERNEL = new Lazarus::Matrix2<double>(); _EROSION_KERNEL->initMatrix(size,size); _EROSION_KERNEL->globalSetMatrixVal(val); return _EROSION_KERNEL; } WeightedMeanFilter() { mp_filter_mask = NULL; } virtual ~WeightedMeanFilter(){} void setKernel(const Lazarus::Matrix2<double>* filter) { this->mp_filter_mask = filter; } /** * We assume a kernel with odd dimensions. The erosion will be computed on an extended image with black borders * such that the kernel can be positioned onto the first image pixel. * Returns the filtered image in case of success otherwise NULL. **/ Lazarus::Image<T>* filterImage( Lazarus::Image<T>* image) { unsigned int offset_x = (mp_filter_mask->getColumnCount()-1)/2; unsigned int offset_y = (mp_filter_mask->getRowCount()-1)/2; unsigned int image_width = image->getm_width(); unsigned int image_heigth = image->getm_height(); unsigned int channel_count = image->getm_channel_count(); unsigned int filter_width = mp_filter_mask->getColumnCount(); unsigned int filter_height = mp_filter_mask->getRowCount(); if(filter_width % 2 != 1) { printf("filter width %d is not odd\n",filter_width); return NULL; } if(filter_height % 2 != 1) { printf("filter height %d is not odd\n",filter_height); return NULL; } Lazarus::Image<T>* output = new Lazarus::Image<T>( image_width, image_heigth, image->getm_data_alignment() ); Lazarus::Image<T>* temporary = new Lazarus::Image<T>( image_width + 2*offset_x, image_heigth + 2*offset_y, image->getm_data_alignment() ); //fill the output and temp image with black Lazarus::FastKTuple<T> color(channel_count); for(unsigned int i=0; i< channel_count; i++) { color.setElement(i,0); } output->fillImageFast( &color ); temporary->fillImageFast( &color ); //copy the input image into the temp buffer; for(unsigned int i=0; i<image_width; i++) { for(unsigned int j=0; j<image_heigth; j++) { image->getPixelFast( &color,i,j ); temporary->setPixelFast(&color,offset_x + i,offset_y + j); } } //start the convolution process //over every pixel //unsigned int c_limit = std::max(channel_count,(unsigned int)3); unsigned int c_limit = 0; if(channel_count > 3) c_limit = 3; else c_limit = channel_count; #pragma omp parallel for for(unsigned int i=offset_x; i<image_width+(offset_x); i++) { double filter_value = 0; double average = 0; Lazarus::FastKTuple<T> new_color(channel_count); Lazarus::FastKTuple<T> color_(channel_count); for(unsigned int j=offset_y; j<image_heigth+(offset_y); j++) { //over every color channel for(unsigned int c=0; c<c_limit; c++) { //erosion for(int k=-offset_x; k<=(int)offset_x; ++k) { for(int l=-offset_y; l<=(int)offset_y; ++l) { temporary->getPixelFast(&color_, (unsigned int)((int)i+k), (unsigned int)((int)j+l)); filter_value = mp_filter_mask->getData((unsigned int)((int)offset_x+k), (unsigned int)((int)offset_y+l) ); average += (color_.getElement(c) * filter_value); } } new_color.setElement(c,(T)(average/(filter_width*filter_height))); average = 0; } //preserve the alpha value if(channel_count>3) { new_color.setElement(3,color_.getElement(3)); } output->setPixelFast(&new_color,i-(offset_x),j-(offset_y)); } } //delete the temporary image delete temporary; return output; } private: const Lazarus::Matrix2<double>* mp_filter_mask; }; } #endif /* IMAGEPROCESSING_WEIGHTEDMEANFILTER_H_ */
Example_nowait.1.c
/* * @@name: nowait.1c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success */ #include <math.h> void nowait_example(int n, int m, float *a, float *b, float *y, float *z) { int i; #pragma omp parallel { #pragma omp for nowait for (i=1; i<n; i++) b[i] = (a[i] + a[i-1]) / 2.0; #pragma omp for nowait for (i=0; i<m; i++) y[i] = sqrt(z[i]); } }
GB_binop__pow_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint64) // C=scalar+B GB (_bind1st__pow_uint64) // C=scalar+B' GB (_bind1st_tran__pow_uint64) // C=A+scalar GB (_bind2nd__pow_uint64) // C=A'+scalar GB (_bind2nd_tran__pow_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = GB_pow_uint64 (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint64 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT64 || GxB_NO_POW_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint64 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint64 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
zq_cnn_convolution_gemm_nchwc_kernel1x1_neon_raw.h
#define __ARMV8 1 static void conv1x1s1_sgemm_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const float* kernel = _kernel; // interleave #if __ARMV8 kernel_tm.create(4 * 8, inch / 4 + inch % 4, outch / 8 + (outch % 8) / 4 + outch % 4); #else kernel_tm.create(4 * 4, inch / 4 + inch % 4, outch / 4 + outch % 4); #endif // __ARM_NEON && __ARMV8 int p = 0; #if __ARMV8 for (; p + 7<outch; p += 8) { const float* kernel0 = kernel + (p + 0)*inch; const float* kernel1 = kernel + (p + 1)*inch; const float* kernel2 = kernel + (p + 2)*inch; const float* kernel3 = kernel + (p + 3)*inch; const float* kernel4 = kernel + (p + 4)*inch; const float* kernel5 = kernel + (p + 5)*inch; const float* kernel6 = kernel + (p + 6)*inch; const float* kernel7 = kernel + (p + 7)*inch; float* ktmp = kernel_tm.channel(p / 8); for (int q = 0; q<inch; q++) { // kernel0...7 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp[4] = kernel4[0]; ktmp[5] = kernel5[0]; ktmp[6] = kernel6[0]; ktmp[7] = kernel7[0]; ktmp += 8; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; kernel4 += 1; kernel5 += 1; kernel6 += 1; kernel7 += 1; } } #endif // __ARMV8 for (; p + 3<outch; p += 4) { const float* kernel0 = kernel + (p + 0)*inch; const float* kernel1 = kernel + (p + 1)*inch; const float* kernel2 = kernel + (p + 2)*inch; const float* kernel3 = kernel + (p + 3)*inch; #if __ARMV8 float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4); #else float* ktmp = kernel_tm.channel(p / 4); #endif // __ARMV8 for (int q = 0; q<inch; q++) { // kernel0...3 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p<outch; p++) { const float* kernel0 = kernel + p*inch; #if __ARMV8 float* ktmp = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else float* ktmp = kernel_tm.channel(p / 4 + p % 4); #endif //__ARMV8 for (int q = 0; q<inch; q++) { ktmp[0] = kernel0[0]; ktmp++; kernel0++; } } } static void conv1x1s1_sgemm_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; const float* bias = _bias; // interleave Mat tmp(8 * 4, inch / 4 + inch % 4, size / 8 + (size % 8) / 4 + size % 4, 4u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii<nn_size; ii++) { int i = ii * 8; const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i / 8); for (int q = 0; q<inch; q++) { #if __ARMV8 vst1q_f32(tmpptr, vld1q_f32(img0)); vst1q_f32(tmpptr + 4, vld1q_f32(img0 + 4)); tmpptr += 8; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1" ); img0 += bottom_blob.cstep; #endif // __ARMV8 } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q<inch; q++) { #if __ARMV8 vst1q_f32(tmpptr, vld1q_f32(img0)); tmpptr += 4; img0 += bottom_blob.cstep; #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0" ); img0 += bottom_blob.cstep; #endif // __ARMV8 } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i<size; i++) { const float* img0 = bottom_blob.channel(0); img0 += i; float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } int nn_outch = 0; int remain_outch_start = 0; #if __ARMV8 nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp<nn_outch; pp++) { int p = pp * 8; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); float* outptr2 = top_blob.channel(p + 2); float* outptr3 = top_blob.channel(p + 3); float* outptr4 = top_blob.channel(p + 4); float* outptr5 = top_blob.channel(p + 5); float* outptr6 = top_blob.channel(p + 6); float* outptr7 = top_blob.channel(p + 7); const float zeros[8] = { 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f }; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7<size; i += 8) { const float* tmpptr = tmp.channel(i / 8); const float* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[0] \n" "dup v18.4s, v0.s[1] \n" "dup v19.4s, v0.s[1] \n" "dup v20.4s, v0.s[2] \n" "dup v21.4s, v0.s[2] \n" "dup v22.4s, v0.s[3] \n" "dup v23.4s, v0.s[3] \n" "dup v24.4s, v1.s[0] \n" "dup v25.4s, v1.s[0] \n" "dup v26.4s, v1.s[1] \n" "dup v27.4s, v1.s[1] \n" "dup v28.4s, v1.s[2] \n" "dup v29.4s, v1.s[2] \n" "dup v30.4s, v1.s[3] \n" "dup v31.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" "st1 {v24.4s, v25.4s}, [%4], #32 \n" "st1 {v26.4s, v27.4s}, [%5], #32 \n" "st1 {v28.4s, v29.4s}, [%6], #32 \n" "st1 {v30.4s, v31.4s}, [%7], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; i + 3<size; i += 4) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const float* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v0.4s, v1.4s}, [%20] \n" "dup v16.4s, v0.s[0] \n" "dup v17.4s, v0.s[1] \n" "dup v18.4s, v0.s[2] \n" "dup v19.4s, v0.s[3] \n" "dup v20.4s, v1.s[0] \n" "dup v21.4s, v1.s[1] \n" "dup v22.4s, v1.s[2] \n" "dup v23.4s, v1.s[3] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "st1 {v20.4s}, [%4], #16 \n" "st1 {v21.4s}, [%5], #16 \n" "st1 {v22.4s}, [%6], #16 \n" "st1 {v23.4s}, [%7], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); } for (; i<size; i++) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const float* kptr = kernel.channel(p / 8); asm volatile( "ld1 {v24.4s, v25.4s}, [%20] \n" // inch loop "lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v0.4s, v8.s[0] \n" "fmla v17.4s, v1.4s, v8.s[0] \n" "fmla v18.4s, v2.4s, v8.s[1] \n" "fmla v19.4s, v3.4s, v8.s[1] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "subs w4, w4, #1 \n" "fmla v20.4s, v4.4s, v8.s[2] \n" "fmla v21.4s, v5.4s, v8.s[2] \n" "fmla v22.4s, v6.4s, v8.s[3] \n" "fmla v23.4s, v7.4s, v8.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "fadd v16.4s, v16.4s, v20.4s \n" "fadd v17.4s, v17.4s, v21.4s \n" "fadd v24.4s, v24.4s, v16.4s \n" "fadd v25.4s, v25.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w21, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #32] \n" "ld1r {v8.4s}, [%8], #4 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v0.4s \n" "fmla v25.4s, v8.4s, v1.4s \n" "bne 2b \n" "3: \n" "st1 {v24.s}[0],[%0], #4 \n" "st1 {v24.s}[1],[%1], #4 \n" "st1 {v24.s}[2],[%2], #4 \n" "st1 {v24.s}[3],[%3], #4 \n" "st1 {v25.s}[0],[%4], #4 \n" "st1 {v25.s}[1],[%5], #4 \n" "st1 {v25.s}[2],[%6], #4 \n" "st1 {v25.s}[3],[%7], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(biasptr), // %20 "r"(inch) // %21 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" ); } } #endif // __ARMV8 nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); float* outptr2 = top_blob.channel(p + 2); float* outptr3 = top_blob.channel(p + 3); const float zeros[4] = { 0.f, 0.f, 0.f, 0.f }; const float* biasptr = bias ? bias + p : zeros; int i = 0; for (; i + 7<size; i += 8) { const float* tmpptr = tmp.channel(i / 8); #if __ARMV8 const float* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const float* kptr = kernel.channel(p / 4); #endif // __ARMV8 #if __ARMV8 asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[0] \n" "dup v10.4s, v0.s[1] \n" "dup v11.4s, v0.s[1] \n" "dup v12.4s, v0.s[2] \n" "dup v13.4s, v0.s[2] \n" "dup v14.4s, v0.s[3] \n" "dup v15.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "st1 {v12.4s, v13.4s}, [%2], #32 \n" "st1 {v14.4s, v15.4s}, [%3], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else // __ARMV8 asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[0] \n" "vdup.f32 q10, d0[1] \n" "vdup.f32 q11, d0[1] \n" "vdup.f32 q12, d1[0] \n" "vdup.f32 q13, d1[0] \n" "vdup.f32 q14, d1[1] \n" "vdup.f32 q15, d1[1] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" "vst1.f32 {d24-d27}, [%2 :128]! \n" "vst1.f32 {d28-d31}, [%3 :128]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __ARMV8 } for (; i + 3<size; i += 4) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARMV8 const float* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const float* kptr = kernel.channel(p / 4); #endif // __ARMV8 #if __ARMV8 asm volatile( "ld1 {v0.4s}, [%12] \n" "dup v8.4s, v0.s[0] \n" "dup v9.4s, v0.s[1] \n" "dup v10.4s, v0.s[2] \n" "dup v11.4s, v0.s[3] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v10.4s}, [%2], #16 \n" "st1 {v11.4s}, [%3], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11" ); #else // __ARMV8 asm volatile( "vld1.f32 {d0-d1}, [%12] \n" "vdup.f32 q8, d0[0] \n" "vdup.f32 q9, d0[1] \n" "vdup.f32 q10, d1[0] \n" "vdup.f32 q11, d1[1] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11" ); #endif // __ARMV8 } for (; i<size; i++) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARMV8 const float* kptr = kernel.channel(p / 8 + (p % 8) / 4); #else const float* kptr = kernel.channel(p / 4); #endif // __ARMV8 #if __ARMV8 asm volatile( "ld1 {v12.4s}, [%12] \n" // inch loop "lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v12.4s, v12.4s, v8.4s \n" "1: \n" // remain loop "and w4, %w13, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #32] \n" "ld1r {v4.4s}, [%4], #4 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "subs w4, w4, #1 \n" "fmla v12.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v12.s}[0], [%0], #4 \n" "st1 {v12.s}[1], [%1], #4 \n" "st1 {v12.s}[2], [%2], #4 \n" "st1 {v12.s}[3], [%3], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12" ); #else // __ARMV8 asm volatile( "vld1.f32 {d24-d25}, [%12] \n" // inch loop "lsr r4, %13, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "0: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q12, q12, q8 \n" "1: \n" // remain loop "and r4, %13, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #32] \n" "vld1.f32 {d8[],d9[]}, [%4]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d24[0]}, [%0]! \n" "vst1.f32 {d24[1]}, [%1]! \n" "vst1.f32 {d25[0]}, [%2]! \n" "vst1.f32 {d25[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(biasptr), // %12 "r"(inch) // %13 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12" ); #endif // __ARMV8 } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; float* outptr0 = out0; int i = 0; for (; i + 7<size; i += 8) { const float* tmpptr = tmp.channel(i / 8); #if __ARMV8 const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const float* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARMV8 #if __ARMV8 asm volatile( "dup v8.4s, %w6 \n" "dup v9.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "subs w4, w4, #1 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4s, v5.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15" ); #else // __ARMV8 asm volatile( "vdup.f32 q8, %6 \n" "vdup.f32 q9, %6 \n" // inch loop "lsr r4, %7, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" // "vld1.f32 {d24-d27}, [%1 :128]! \n" // "vld1.f32 {d28-d31}, [%1 :128]! \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15" ); #endif // __ARMV8 } for (; i + 3<size; i += 4) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #if __ARMV8 const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const float* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARMV8 #if __ARMV8 asm volatile( "dup v8.4s, %w6 \n" // inch loop "lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w7, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v4.4s}, [%1], #16 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "subs w4, w4, #1 \n" "fmla v8.4s, v4.4s, v0.4s \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8" ); #else // __ARMV8 asm volatile( "vdup.f32 q8, %6 \n" // inch loop "lsr r4, %7, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %7, #3 \n"// r4 = remain = inch & 3; "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #128] \n" "vld1.f32 {d8-d9}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(bias0), // %6 "r"(inch) // %7 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8" ); #endif // __ARMV8 } for (; i<size; i++) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); #if __ARMV8 const float* kptr = kernel.channel(p / 8 + (p % 8) / 4 + p % 4); #else const float* kptr = kernel.channel(p / 4 + p % 4); #endif // __ARMV8 int q = 0; float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q + 3<inch; q += 4) { float32x4_t _p0 = vld1q_f32(tmpptr); tmpptr += 4; float32x4_t _k0 = vld1q_f32(kptr); kptr += 4; #if __ARMV8 _sum0 = vfmaq_f32(_sum0, _p0, _k0); #else _sum0 = vmlaq_f32(_sum0, _p0, _k0); #endif } #if __ARMV8 float sum0 = bias0 + vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0); #endif for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } }
bfecc_convection.h
// KRATOS ___ ___ _ ___ __ ___ ___ ___ ___ // / __/ _ \| \| \ \ / /__| \_ _| __| __| // | (_| (_) | .` |\ V /___| |) | || _|| _| // \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_BFECC_CONVECTION_INCLUDED ) #define KRATOS_BFECC_CONVECTION_INCLUDED #define PRESSURE_ON_EULERIAN_MESH #define USE_FEW_PARTICLES // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "includes/variables.h" #include "utilities/timer.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/openmp_utils.h" #include "processes/compute_nodal_gradient_process.h" #include "utilities/parallel_utilities.h" #include "utilities/pointer_communicator.h" #include "utilities/pointer_map_communicator.h" namespace Kratos { template<std::size_t TDim> class BFECCConvection { public: KRATOS_CLASS_POINTER_DEFINITION(BFECCConvection<TDim>); BFECCConvection( typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure, const bool PartialDt = false, const bool ActivateLimiter = false) : mpSearchStructure(pSearchStructure), mPartialDt(PartialDt), mActivateLimiter(ActivateLimiter) { } ~BFECCConvection() { } //********************************************************************************************** //********************************************************************************************** void BFECCconvect( ModelPart& rModelPart, const Variable< double >& rVar, const Variable<array_1d<double,3> >& conv_var, const double substeps) { KRATOS_TRY double dt_factor = 1.0; if (mPartialDt){ dt_factor = rModelPart.GetProcessInfo()[DELTA_TIME_FACTOR]; } KRATOS_ERROR_IF(dt_factor < 1.0e-2) << "ERROR: DELTA_TIME_FACTOR should be larger than zero." <<std::endl; const double dt = dt_factor*rModelPart.GetProcessInfo()[DELTA_TIME]; //do movement Vector N(TDim + 1); Vector N_valid(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rModelPart.Nodes().size(); PointerVector< Element > elem_backward( rModelPart.Nodes().size()); std::vector< Vector > Ns( rModelPart.Nodes().size()); std::vector< bool > found( rModelPart.Nodes().size()); // Allocate non-historical variables and update old velocity as per dt_factor block_for_each(rModelPart.Nodes(), [&](Node<3>& rNode){ rNode.SetValue(rVar, 0.0); auto &r_old_velocity = rNode.FastGetSolutionStepValue(conv_var, 1); rNode.SetValue(conv_var, r_old_velocity); noalias(r_old_velocity) = dt_factor*r_old_velocity + (1.0 - dt_factor)*rNode.FastGetSolutionStepValue(conv_var); }); mLimiter.resize(nparticles); if (mActivateLimiter){ CalculateLimiter(rModelPart, rVar); } else{ for (int i = 0; i < nparticles; i++){ mLimiter[i] = 1.0; } } //FIRST LOOP: estimate rVar(n+1) #pragma omp parallel for firstprivate(results,N,N_valid) for (int i = 0; i < nparticles; i++) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); ModelPart::NodesContainerType::iterator it_particle = rModelPart.NodesBegin() + i; Element::Pointer pelement; Element::Pointer pelement_valid; array_1d<double,3> bckPos = it_particle->Coordinates(); const array_1d<double,3>& vel = it_particle->FastGetSolutionStepValue(conv_var); bool has_valid_elem_pointer = false; bool is_found = ConvectBySubstepping(dt,bckPos,vel, N,N_valid, pelement,pelement_valid, result_begin, max_results, -1.0, substeps, conv_var, has_valid_elem_pointer); found[i] = is_found; if(is_found) { //save position backwards elem_backward(i) = pelement; Ns[i] = N; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) ); } it_particle->FastGetSolutionStepValue(rVar) = phi1; } else if(has_valid_elem_pointer) { //save position backwards elem_backward(i) = pelement_valid; Ns[i] = N_valid; Geometry< Node < 3 > >& geom = pelement_valid->GetGeometry(); double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N_valid[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) ); } it_particle->FastGetSolutionStepValue(rVar) = phi1; } } //now obtain the value AT TIME STEP N by taking it from N+1 #pragma omp parallel for firstprivate(results,N,N_valid) for (int i = 0; i < nparticles; i++) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); ModelPart::NodesContainerType::iterator it_particle = rModelPart.NodesBegin() + i; Element::Pointer pelement; Element::Pointer pelement_valid; array_1d<double,3> fwdPos = it_particle->Coordinates(); const array_1d<double,3>& vel = it_particle->FastGetSolutionStepValue(conv_var,1); bool has_valid_elem_pointer = false; bool is_found = ConvectBySubstepping(dt,fwdPos,vel, N, N_valid, pelement, pelement_valid, result_begin, max_results, 1.0, substeps, conv_var,has_valid_elem_pointer); if(is_found) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); double phi_old = N[0] * ( geom[0].FastGetSolutionStepValue(rVar)); for (unsigned int k = 1; k < geom.size(); k++) { phi_old += N[k] * ( geom[k].FastGetSolutionStepValue(rVar) ); } //store correction const auto limiter_factor = 0.5*mLimiter[i]; it_particle->GetValue(rVar) = (1.0 + limiter_factor)*it_particle->FastGetSolutionStepValue(rVar,1) - limiter_factor*phi_old; // iparticle->FastGetSolutionStepValue(rVar) = iparticle->GetValue(rVar) - 0.5 * (phi2 - iparticle->FastGetSolutionStepValue(rVar,1)); } else { it_particle->GetValue(rVar) = it_particle->FastGetSolutionStepValue(rVar,1); } } #pragma omp parallel for for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator it_particle = rModelPart.NodesBegin() + i; bool is_found = found[i]; if(is_found) { Vector N = Ns[i]; Geometry< Node < 3 > >& geom = elem_backward[i].GetGeometry(); double phi1 = N[0] * ( geom[0].GetValue(rVar)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N[k] * ( geom[k].GetValue(rVar) ); } it_particle->FastGetSolutionStepValue(rVar) = phi1; } // else // std::cout << "it should find it" << std::endl; it_particle->FastGetSolutionStepValue(conv_var, 1) = it_particle->GetValue(conv_var); // Restoring the old velocity } KRATOS_CATCH("") } bool ConvectBySubstepping( const double dt, array_1d<double,3>& position, //IT WILL BE MODIFIED const array_1d<double,3>& initial_velocity, Vector& N, Vector& N_valid, Element::Pointer& pelement, Element::Pointer& pelement_valid, typename BinBasedFastPointLocator<TDim>::ResultIteratorType& result_begin, const unsigned int max_results, const double velocity_sign, const double subdivisions, const Variable<array_1d<double,3> >& conv_var, bool& has_valid_elem_pointer) { bool is_found = false; array_1d<double,3> veulerian; const double small_dt = dt/subdivisions; if(velocity_sign > 0.0) //going from the past to the future { noalias(position) += small_dt*initial_velocity; unsigned int substep=0; while(substep++ < subdivisions) { is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = static_cast<double>(substep)/subdivisions; const double old_step_factor = (1.0 - new_step_factor); noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[0].FastGetSolutionStepValue(conv_var,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[k].FastGetSolutionStepValue(conv_var,1) ); noalias(position) += small_dt*veulerian; N_valid = N; pelement_valid = pelement; has_valid_elem_pointer = true; } else break; } } else //going from the future to the past { noalias(position) -= small_dt*initial_velocity; unsigned int substep=0; while(substep++ < subdivisions) { is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //this factors get inverted from the other case const double old_step_factor = static_cast<double>(substep)/subdivisions; const double new_step_factor = (1.0 - old_step_factor); noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[0].FastGetSolutionStepValue(conv_var,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[k].FastGetSolutionStepValue(conv_var,1) ); noalias(position) -= small_dt*veulerian; N_valid = N; pelement_valid = pelement; has_valid_elem_pointer = true; } else break; } } return is_found; } // ************************************************************************************************************ // See [Kuzmin et al., Comput. Methods Appl. Mech. Engrg., 322 (2017) 23–41] for more info about this limiter // Befor calling make sure that non-historical variable "DISTANCE_GRADIENT" contains the nodal gradient of rVar void CalculateLimiter( ModelPart& rModelPart, const Variable< double >& rVar) { const double epsilon = 1.0e-15; const double power = 2.0; const int nparticles = rModelPart.Nodes().size(); if(static_cast<int>(mSigmaPlus.size()) != nparticles){ mSigmaPlus.resize(nparticles); mSigmaMinus.resize(nparticles); } auto& r_default_comm = rModelPart.GetCommunicator().GetDataCommunicator(); GlobalPointersVector< Node<3 > > gp_list; for (int i_node = 0; i_node < static_cast<int>(rModelPart.NumberOfNodes()); ++i_node){ auto it_node = rModelPart.NodesBegin() + i_node; GlobalPointersVector< Node<3 > >& global_pointer_list = it_node->GetValue(NEIGHBOUR_NODES); for (unsigned int j = 0; j< global_pointer_list.size(); ++j) { auto& global_pointer = global_pointer_list(j); gp_list.push_back(global_pointer); } } GlobalPointerCommunicator< Node<3 > > pointer_comm(r_default_comm, gp_list); auto coordinate_proxy = pointer_comm.Apply( [](GlobalPointer<Node<3> >& global_pointer) -> Point::CoordinatesArrayType { return global_pointer->Coordinates(); } ); auto distance_proxy = pointer_comm.Apply( [&](GlobalPointer<Node<3> >& global_pointer) -> double { return global_pointer->FastGetSolutionStepValue(rVar); } ); IndexPartition<int>(nparticles).for_each( [&](int i_node){ auto it_node = rModelPart.NodesBegin() + i_node; const auto& X_i = it_node->Coordinates(); const auto& grad_i = it_node->GetValue(DISTANCE_GRADIENT); double S_plus = 0.0; double S_minus = 0.0; GlobalPointersVector< Node<3 > >& global_pointer_list = it_node->GetValue(NEIGHBOUR_NODES); for (unsigned int j = 0; j< global_pointer_list.size(); ++j) { /* if (it_node->Id() == j_node->Id()) continue; */ auto& global_pointer = global_pointer_list(j); auto X_j = coordinate_proxy.Get(global_pointer); S_plus += std::max(0.0, inner_prod(grad_i, X_i-X_j)); S_minus += std::min(0.0, inner_prod(grad_i, X_i-X_j)); } mSigmaPlus[i_node] = std::min(1.0, (std::abs(S_minus)+epsilon)/(S_plus+epsilon)); mSigmaMinus[i_node] = std::min(1.0, (S_plus+epsilon)/(std::abs(S_minus)+epsilon)); } ); IndexPartition<int>(nparticles).for_each( [&](int i_node){ auto it_node = rModelPart.NodesBegin() + i_node; const double distance_i = it_node->FastGetSolutionStepValue(rVar); const auto& X_i = it_node->Coordinates(); const auto& grad_i = it_node->GetValue(DISTANCE_GRADIENT); double numerator = 0.0; double denominator = 0.0; GlobalPointersVector< Node<3 > >& global_pointer_list = it_node->GetValue(NEIGHBOUR_NODES); for (unsigned int j = 0; j< global_pointer_list.size(); ++j) { /* if (it_node->Id() == j_node->Id()) continue; */ auto& global_pointer = global_pointer_list(j); auto X_j = coordinate_proxy.Get(global_pointer); const double distance_j = distance_proxy.Get(global_pointer); double beta_ij = 1.0; if (inner_prod(grad_i, X_i-X_j) > 0) beta_ij = mSigmaPlus[i_node]; else if (inner_prod(grad_i, X_i-X_j) < 0) beta_ij = mSigmaMinus[i_node]; numerator += beta_ij*(distance_i - distance_j); denominator += beta_ij*std::abs(distance_i - distance_j); } const double fraction = (std::abs(numerator)/* +epsilon */) / (denominator + epsilon); mLimiter[i_node] = 1.0 - std::pow(fraction, power); } ); } void ResetBoundaryConditions(ModelPart& rModelPart, const Variable< double >& rVar) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rModelPart.NodesBegin(); vector<int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(rVar)) { inode->FastGetSolutionStepValue(rVar)=inode->GetSolutionStepValue(rVar,1); } } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(ModelPart& rModelPart, const Variable< double >& rVar) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rModelPart.NodesBegin(); vector<int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(rVar,1) = inode->FastGetSolutionStepValue(rVar); } } KRATOS_CATCH("") } protected: Kratos::Vector mSigmaPlus, mSigmaMinus, mLimiter; private: typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure; const bool mPartialDt; const bool mActivateLimiter; }; } // namespace Kratos. #endif // KRATOS_BFECC_CONVECTION_INCLUDED defined
target_teams_distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd foo void test_no_clause() { int i; #pragma omp target teams distribute simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp target teams distribute simd' must be a for loop}} #pragma omp target teams distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target teams distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd; for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} #pragma omp target teams distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp target teams distribute simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp target teams distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp target teams distribute simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} #pragma omp target teams distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp target teams distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp target teams distribute simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp target teams distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp target teams distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-error@+4 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp target teams distribute simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}} for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target teams distribute simd' directive may not be firstprivate, predetermined as lastprivate}} for (int j = 0; j < 16; ++j) #pragma omp parallel for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target teams distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd firstprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd firstprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd firstprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp target teams distribute simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; // expected-error@+1 {{lastprivate variable cannot be firstprivate}} expected-note@+1 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{lastprivate variable cannot be firstprivate}} expected-note@+1 2 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 3 {{lastprivate variable cannot be firstprivate}} expected-note@+1 3 {{defined as lastprivate}} #pragma omp target teams distribute simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp target teams distribute simd simdlen(64) safelen(8) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp target teams distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void test_nontemporal() { int i; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd nontemporal( for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd nontemporal(, for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 2 {{expected expression}} #pragma omp target teams distribute simd nontemporal(, ) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd nontemporal() for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected expression}} #pragma omp target teams distribute simd nontemporal(int) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} omp50-error@+1 {{expected variable name}} #pragma omp target teams distribute simd nontemporal(0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp target teams distribute simd nontemporal(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp target teams distribute simd nontemporal(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp target teams distribute simd nontemporal(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp target teams distribute simd nontemporal(x :) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} #pragma omp target teams distribute simd nontemporal(x :, ) for (i = 0; i < 16; ++i) ; // omp50-note@+2 {{defined as nontemporal}} // omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}} #pragma omp target teams distribute simd nontemporal(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd private(x) nontemporal(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd nontemporal(x) private(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} #pragma omp target teams distribute simd nontemporal(x, y : 0) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd nontemporal(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target teams distribute simd'}} #pragma omp target teams distribute simd lastprivate(x) nontemporal(x) for (i = 0; i < 16; ++i) ; #pragma omp target teams distribute simd order // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected '(' after 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp target teams distribute simd order( // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp target teams distribute simd order(none // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{expected 'concurrent' in OpenMP clause 'order'}} for (int i = 0; i < 10; ++i) ; #pragma omp target teams distribute simd order(concurrent // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} expected-error {{expected ')'}} expected-note {{to match this '('}} for (int i = 0; i < 10; ++i) ; #pragma omp target teams distribute simd order(concurrent) // omp45-error {{unexpected OpenMP clause 'order' in directive '#pragma omp target teams distribute simd'}} for (int i = 0; i < 10; ++i) ; }
Mat.h
/* Copyright 2020,2021 LiGuer. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. [Reference] [1]Introduction Algorithms.THOMAS H.CORMEN,CHARLES E.LEISERSON,RONALD L.RIVEST,CLIFFORD STEIN ==============================================================================*/ #ifndef _MAT_H #define _MAT_H #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <initializer_list> #include "omp.h" template<class T = double> class Mat { public: /****************************************************************************** * 核心数据 ******************************************************************************/ T* data = NULL; //数据堆叠方向: 行优先 int rows = 0, cols = 0; /****************************************************************************** * 基础函数 ------------------------------------------------------------------------------- Mat(); //构造/析构函数 Mat(const int _rows, const int _cols); Mat(const int _rows); Mat(const Mat& a); ~Mat(); void error(); //报错 int size(); //Size Mat& fill(T a); //填充 void eatMat(Mat& a); //吃掉另一个矩阵(指针操作) void swap(Mat& a); //交换数据 [ swap ] ******************************************************************************/ /*---------------- 构造/析构函数 ----------------*/ Mat() { ; } Mat(const int _rows, const int _cols) { zero(_rows, _cols); } Mat(const int _rows) { zero(_rows, 1); } Mat(const Mat& a) { *this = a; } Mat(const int _rows, const int _cols, T* _data) { zero(_rows, _cols); set(_data); } ~Mat() { delete data; } /*---------------- 报错 ----------------*/ static void error() { exit(-1); } /*---------------- Size ----------------*/ inline int size() const { return rows * cols; } /*---------------- 填充 ----------------*/ inline Mat& fill(const T& a) { for (int i = 0; i < size(); i++) data[i] = a; return *this; } /*---------------- 吃掉另一个矩阵(指针操作) ----------------*/ inline Mat& eatMat(Mat& a) { if (data != NULL) delete data; data = a.data; a.data = NULL; rows = a.rows; cols = a.cols; a.rows = a.cols = 0; return *this; } /*----------------交换数据 [ swap ]----------------*/ Mat& swap(Mat& a) { T* tmp = a.data; a.data = data; data = tmp; int t = a.rows; a.rows = rows; rows = t; t = a.cols; a.cols = cols; cols = t; return *this; } /****************************************************************************** * 基础矩阵 ------------------------------------------------------------------------------- * [0]alloc 分配空间 [1]zero 零元/清零 [2]E 单位元 [3]ones 全1元 [4]rands 随机元 ******************************************************************************/ /*---------------- 分配空间 ----------------*/ Mat& alloc(const int _rows, const int _cols = 1) { if (_rows != rows || _cols != cols) { if (data != NULL) delete data; data = (T*)malloc(_rows * _cols * sizeof(T)); rows = _rows; cols = _cols; } return *this; } /*---------------- 零元/清零 ----------------*/ inline Mat& zero() { memset(data, 0, sizeof(T) * size()); return *this; } Mat& zero(const int _rows, const int _cols = 1) { alloc(_rows, _cols); zero(); return *this; } Mat& zero(Mat& a) { zero(a.rows, a.cols); return *this; } /*---------------- 单位元 ----------------*/ Mat& E(const int _rows) { zero(_rows, _rows); for (int i = 0; i < rows; i++) (*this)(i, i) = 1; return *this; } Mat& E() { zero(); for (int i = 0; i < rows; i++) (*this)(i, i) = 1; return *this; } /*---------------- 全1元 ----------------*/ Mat& ones(const int _rows, const int _cols = 1) { alloc(_rows, _cols); fill(1); return *this; } /*---------------- 随机元 ----------------*/ Mat& rands(T st, T ed) { //#pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < size(); i++) data[i] = rand() / double(RAND_MAX) * (ed - st) + st; //[st,ed) return *this; } Mat& rands(const int _rows, const int _cols, T st, T ed) { alloc(_rows, _cols); rands(st, ed); return *this; } /*---------------- 线性间距向量全页 ----------------*/ Mat& linspace(T xs, T xe, int n = 100) { T dx = (xe - xs) / (n - 1); alloc(n); for (int i = 0; i < size(); i++) data[i] = xs + dx * i; data[size() - 1] = xe; return *this; } /****************************************************************************** * 基础运算 ------------------------------------------------------------------------------- T& operator[](int i); // 索引元素 T& operator()(int i, int j); T& operator()(int i); T max(); // max/min T max(int& index); T min(); T min(int& index); bool operator== (const Mat& a); //判断相等 [==] Mat& operator= (const Mat& a); //赋矩阵 [=] //不能赋值自己 Mat& set (T* a); Mat& set (T x, T y); Mat& set (T x, T y, T z); Mat& operator+= (Mat& a); //加法 [add +] Mat& add (Mat& a, Mat& b); Mat& operator-= (Mat& a); //减法 [sub -] Mat& sub (Mat& a, Mat& b); Mat& mul (Mat& a, Mat& b); //乘法 [mul ×] Mat& operator*= (const Mat& a); Mat& operator*= (const double a); //数乘 [mul ×] Mat& mul (const double a, Mat& b); Mat& div (const double a, Mat& b); //数除 [div /] T dot (Mat& a, Mat& b); //点乘 [dot ·] T dot (Mat& a); Mat& cross (Mat& a, Mat& b); //叉乘 [cross ×] Mat& cross_ (Mat& a, Mat& b); Mat& elementMul(Mat& a, Mat& b); //元素乘 [elementMul ×] Mat& elementMul(Mat& a); Mat& elementDiv (Mat& a, Mat& b); //元素除 [elementDiv /] Mat& elementDiv (Mat& a); Mat& negative (Mat& ans); //负 [negative -] Mat& transpose (Mat& ans); //转置 [transpose T] T sum (); //求和 [sum Σ] T sum (Mat& a); Mat& sum (Mat& ans,int dim); T product (); //求积 [product Π] T norm (); //范数 [norm ||x||] Mat& normalize (); //归一化 [normalize] T comi (int i0, int j0); //余子式 [comi] Mat& inv (Mat& ans); //取逆 [inv x~¹] T abs (); //行列式 [abs |x|] Mat& adjugate (Mat& ans); //伴随矩阵 [adjugate A*] void eig (T esp, Mat& eigvec, Mat& eigvalue); //特征值特征向量 [eig] Mat& solveEquations (Mat& b, Mat& x); //解方程组 [solveEquations] void LUPdecomposition (Mat& U, Mat& L, Mat<int>& P); //LUP分解 [LUPdecomposition] Mat& diag (Mat& ans); //构造对角矩阵 [diag] Mat& conv (Mat& a, Mat& b, int padding = 0, int stride = 1); //卷积 [conv] Mat& function (Mat& x, T (*f)(T)) //函数操作 Mat& function (T (*f)(T)) ------------------------------------------------------------------------------- * 运算嵌套注意,Eg: b.add(b.mul(a, b), a.mul(-1, a)); 不管括号第一二项顺序,都是数乘,乘法,加法, 问题原因暂不了解,别用该形式。 * 加减乘,即使是自己也不会影响,效率也不影响 ******************************************************************************/ /*---------------- "[]" "()"取元素 ---------------- * 索引方向: 先纵再横. ---------------------------------------------*/ T& operator[](int i) { return data[i]; } T& operator()(int x) { return data[x]; } T& operator()(int x, int y) { return data[x * cols + y]; } inline void i2xy(int& i, int& x, int& y) { x = i / cols ; y = i % cols; } inline int i2x (int i) { return i / cols; } inline int i2y (int i) { return i % cols; } inline int xy2i (int x, int y) { return x * cols + y; } /*---------------- max/min ----------------*/ T max() const { T maxdata = *data; for (int i = 1; i < size(); i++) maxdata = maxdata >= data[i] ? maxdata : data[i]; return maxdata; } T max(int& index) { T maxdata = *data; index = 0; for (int i = 1; i < size(); i++) if (maxdata < data[i]) { maxdata = data[i]; index = i; } return maxdata; } T min() const { T mindata = *data; for (int i = 1; i < size(); i++) mindata = mindata <= data[i] ? mindata : data[i]; return mindata; } T min(int& index) { T mindata = *data; index = 0; for (int i = 1; i < size(); i++) if (mindata > data[i]) { mindata = data[i]; index = i; } return mindata; } // 行/列 Mat& max(int index, Mat& ans) { if (index == 0) { ans.alloc(rows); for (int x = 0; x < rows; x++) ans[x] = (*this)(x, 0); for (int x = 0; x < rows; x++) for (int y = 0; y < cols; y++) ans(x) = ans(x) >= (*this)(x, y) ? ans(x) : (*this)(x, y); return ans; } else { ans.alloc(1, cols); for (int y = 0; y < cols; y++) ans[y] = (*this)(0, y); for (int x = 0; x < rows; x++) for (int y = 0; y < cols; y++) ans(y) = ans(y) >= (*this)(x, y) ? ans(y) : (*this)(x, y); return ans; } } Mat& min(int index, Mat& ans) { if (index == 0) { ans.alloc(rows); for (int x = 0; x < rows; x++) ans[x] = (*this)(x, 0); for (int x = 0; x < rows; x++) for (int y = 0; y < cols; y++) ans(x) = ans(x) <= (*this)(x, y) ? ans(x) : (*this)(x, y); return ans; } else { ans.alloc(1, cols); for (int y = 0; y < cols; y++) ans[y] = (*this)(0, y); for (int x = 0; x < rows; x++) for (int y = 0; y < cols; y++) ans(y) = ans(y) <= (*this)(x, y) ? ans(y) : (*this)(x, y); return ans; } } /*----------------判断相等 [ ==/!= ]----------------*/ bool operator==(const Mat& a) { if (rows != a.rows || cols != a.cols)return false; return memcmp(data, a.data, size() * sizeof(T)) == 0 ? true : false; } /*----------------赋矩阵 [ = ]----------------*/ //不能赋值自己 Mat& operator=(const Mat& a) { if (a.data == NULL) return *this; alloc(a.rows, a.cols); memcpy(data, a.data, sizeof(T) * size()); return *this; } Mat& operator=(T* a) { memcpy(data, a, sizeof(T) * size()); return *this; } Mat& operator=(T x) { return fill(x); } Mat& operator=(std::initializer_list<T> list) { int i = 0; for (auto& item : list) data[i++] = item; return *this; } Mat& set(T x, T y) { if (size() != 2) error(); data[0] = x; data[1] = y; return *this; } Mat& set(T x, T y, T z) { if (size() != 3) error(); data[0] = x; data[1] = y; data[2] = z; return *this; } Mat& set(const char* fileName) { FILE* fin = fopen(fileName, "r"); for (int i = 0; i < size(); i++) fscanf(fin, "%lf", &data[i]); return *this; } Mat& set_(const int _rows, const int _cols, T* _data) { rows = _rows; cols = _cols; data = _data; return *this; } /*----------------加法 [ add + ]----------------*/ Mat& operator+=(Mat& a) { if (a.rows != rows || a.cols != cols) error(); //#pragma omp parallel for for (int i = 0; i < a.size(); i++) data[i] += a[i]; return *this; } Mat& add(Mat& a, Mat& b) { if (a.rows != b.rows || a.cols != b.cols) error(); alloc(a.rows, a.cols); for (int i = 0; i < a.size(); i++) data[i] = a[i] + b[i]; return *this; } /*----------------减法 [ sub - ]----------------*/ Mat& operator-=(Mat& a) { if (a.rows != rows || a.cols != cols) error(); for (int i = 0; i < a.size(); i++) data[i] -= a[i]; return *this; } Mat& sub(Mat& a, Mat& b) { if (a.rows != b.rows || a.cols != b.cols) error(); alloc(a.rows, a.cols); for (int i = 0; i < a.size(); i++) data[i] = a[i] - b[i]; return *this; } /*----------------乘法 [ mul × ]----------------*/ Mat& mul(Mat& a, Mat& b) { if (a.cols != b.rows) error(); Mat ansTmp(a.rows, b.cols); for (int i = 0; i < a.rows; i++) for (int j = 0; j < b.cols; j++) for (int k = 0; k < a.cols; k++) ansTmp(i, j) += a(i, k) * b(k, j); return eatMat(ansTmp); } Mat& operator*=(Mat& a) { if (cols != a.rows) error(); Mat ansTmp(rows, a.cols); //#pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < rows; i++) for (int j = 0; j < a.cols; j++) for (int k = 0; k < cols; k++) ansTmp(i, j) += (*this)(i, k) * a(k, j); return eatMat(ansTmp); } /*----------------数乘 [ mul × ]----------------*/ Mat& operator*=(const double a) { for (int i = 0; i < size(); i++) data[i] *= a; return *this; } Mat& mul(const double a, Mat& b) { alloc(b.rows, b.cols); for (int i = 0; i < size(); i++) data[i] = a * b[i]; return *this; } /*----------------数除 [ div / ]----------------*/ Mat& operator/=(const double a) { for (int i = 0; i < size(); i++) data[i] /= a; return *this; } Mat& div(const double a, Mat& b) { alloc(b.rows, b.cols); for (int i = 0; i < size(); i++) data[i] = a / b[i]; return *this; } /*----------------点乘 [ dot · ]---------------- * a·b = Σ ai·bi = aT * b **------------------------------------------------*/ static T dot(Mat& a, Mat& b) { if (a.rows != b.rows || a.cols != b.cols) error(); T ans = a[0] * b[0]; for (int i = 1; i < a.size(); i++) ans += a[i] * b[i]; return ans; } T dot(Mat& a) { if (a.rows != rows && a.cols != cols) error(); T ans = data[0] * a[0]; for (int i = 1; i < size(); i++) ans += data[i] * a[i]; return ans; } /*----------------叉乘 [ cross × ]---------------- //####################### 暂时只三维 * 𝑎 × 𝑏 ⃑ = | 𝑥 𝑦 𝑧 | | 𝑥𝑎 𝑦𝑎 za | | 𝑥𝑏 𝑦𝑏 zb | **------------------------------------------------*/ Mat& cross(Mat& a, Mat& b) { if (a.rows != b.rows)error(); Mat ansTmp(a.rows, a.cols); ansTmp[0] = a[1] * b[2] - a[2] * b[1]; ansTmp[1] = a[2] * b[0] - a[0] * b[2]; ansTmp[2] = a[0] * b[1] - a[1] * b[0]; return eatMat(ansTmp); } Mat& cross_(Mat& a, Mat& b) { if (a.rows != b.rows)error(); alloc(a.rows, a.cols); data[0] = a[1] * b[2] - a[2] * b[1]; data[1] = a[2] * b[0] - a[0] * b[2]; data[2] = a[0] * b[1] - a[1] * b[0]; return *this; } /*----------------元素乘 [ elementMul × ]----------------*/ Mat& elementMul(Mat& a, Mat& b) { if (a.rows != b.rows || a.cols != b.cols) error(); alloc(a.rows, a.cols); for (int i = 0; i < size(); i++) data[i] = a[i] * b[i]; return*this; } Mat& elementMul(Mat& a) { if (rows != a.rows || cols != a.cols) error(); //#pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < size(); i++) data[i] *= a[i]; return *this; } /*----------------元素除 [ elementDiv / ]----------------*/ Mat& elementDiv(Mat& a, Mat& b) { if (a.rows != b.rows || a.cols != b.cols) error(); alloc(a.rows, a.cols); for (int i = 0; i < size(); i++)data[i] = a[i] / b[i]; return *this; } Mat& elementDiv(Mat& a) { if (rows != a.rows || cols != a.cols) error(); for (int i = 0; i < size(); i++)data[i] /= a[i]; return *this; } /*----------------负 [ negative - ]----------------*/ Mat& negative(Mat& ans) { ans.alloc(rows, cols); for (int i = 0; i < size(); i++) ans[i] = -data[i]; return ans; } /*----------------转置 [ transpose T ]----------------*/ Mat& transpose(Mat& ans) { Mat ansTmp(cols, rows); for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) ansTmp(j, i) = (*this)(i, j); return ans.eatMat(ansTmp); } /*----------------求和 [ sum Σ ]----------------*/ T sum() { T ans = data[0]; for (int i = 1; i < size(); i++) ans += data[i]; return ans; } static T sum(Mat& a) { T ans = a[0]; for (int i = 1; i < size(); i++) ans += a[i]; return ans; } Mat& sum(Mat& ans,int dim) { if (dim == 0) { //对每一列求和 Mat ansTmp(1, cols); for (int j = 0; j < cols; j++) for (int i = 0; i < rows; i++) ansTmp[j] += (*this)(i, j); return ans.eatMat(ansTmp); } if (dim == 1) { //对每一行求和 Mat ansTmp(rows); for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) ansTmp[i] += (*this)(i, j); return ans.eatMat(ansTmp); } error(); return ans; } /*----------------求积 [ product Π ]----------------*/ T product() { T ans = data[0]; for (int i = 1; i < size(); i++) ans *= data[i]; return ans; } /*----------------范数 [ norm ||x|| ]---------------- * ||a|| = sqrt(a·a) **-------------------------------------------*/ T norm() { return sqrt(dot(*this, *this)); } /*----------------归一化 [ normalize ]---------------- * [定义]: 使得|| x || = 1 **------------------------------------------------------*/ Mat& normalize() { T t = norm(); if (t == 0)return *this; for (int i = 0; i < size(); i++) data[i] /= t; return *this; } /*----------------余子式 [ comi ]---------------- * Mij: A 去掉第i行,第j列 **-----------------------------------------------*/ T comi(int i0, int j0) { Mat tmp(rows - 1, cols - 1); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (i == i0 || j == j0) continue; tmp(i < i0 ? i : i - 1, j < j0 ? j : j - 1) = (*this)(i, j); } } return tmp.abs(); } /*----------------取逆 [ inv x~¹ ]---------------- * [定义]: A A~¹ = E * [方法]: 利用不断解线性方程组,对每一列求解. **------------------------------------------*/ Mat& inv(Mat& ans) { if (rows != cols)error(); Mat tmp(rows, cols); int n = rows; // LUP分解 Mat L, U; Mat<int> P; LUPdecomposition(U, L, P); //对每一列 Mat b(n), x(n); for (int k = 0; k < n; k++) { b.zero(); b[k] = 1; // 解线性方程组 //solve y for (int i = 0; i < n; i++) { x[i] = b[P[i]]; //yi for (int j = 0; j < i; j++) x[i] -= x[j] * L(i, j); } //solve x for (int i = n - 1; i >= 0; i--) { for (int j = i + 1; j < n; j++) x[i] -= x[j] * U(i, j); x[i] /= U(i, i); } //合并至结果 for (int i = 0; i < rows; i++) tmp(i, k) = x[i]; } return ans.eatMat(tmp); } /*----------------行列式 [ abs |x| ]---------------- * |A| = Σiorj aij·Aij * Aij = (-1)^(i+j)·Mij // Mij余子式 * 1阶: |A| = A00 2阶: |A| = A00·A11 - A01·A10 3阶: |A| = A00·A11·A22 + A01·A12·A20 + A02·A10·A21 - A02·A11·A20 - A00·A12·A21 - A01·A10·A22 **----------------------------------------------*/ T abs() { if (rows != cols)error(); //加速 if (rows == 1)return data[0]; if (rows == 2)return (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(0, 1); T ans; memset(&ans, 0, sizeof(T)); if (rows == 3) { T t; for (int i = 0; i < 3; i++) { t = 1; for (int j = 0; j < 3; j++) t *= (*this)(j, (j + i) % 3); ans += t; for (int j = 0; j < 3; j++) t *= (*this)(j, (2 - j + i) % 3); ans -= t; } return ans; } //普适 for (int i = 0; i < rows; i++) ans += (*this)(i, 0) * (i % 2 == 0 ? 1 : -1) * comi(i, 0); return ans; } /*--------------伴随矩阵 [ adjugate A* ]---------------- * [定义]: 伴随矩阵A* 由(i,j)代数余子式Aij构成 [ A00 ... ] A* = | A01 Aij | [ A02 ... ] * [性质]: A* A = |A| **---------------------------------------------*/ Mat& adjugate(Mat& ans) { Mat ansTmp(rows, cols); for (int i = 0; i < rows; i++) for (int j = 0; j < cols; j++) ansTmp(i, j) = ((i + j) % 2 == 0 ? 1 : -1) * comi(i, j); return ans.eatMat(ansTmp); } /*----------------特征值特征向量 [ eig ]---------------- * [定义]: 特征方程: AX = λX * A: 目标矩阵 X: 特征向量 λ: 特征值 * [性质]: * 若 R 为正交矩阵 (R'R = E),有B = R~¹A R , 使得 BY = λY, 特征值不变. * 又有 X = R Y. * [算法]: 雅可比迭代: * * 原理: * 对于目标实矩阵A, 构造正交矩阵序列 R1, R2, ... , Rn, * D0 = A * Dj = RjT Dj-1 Rj * => limj->∞ Dj = D = diag(λ1, λ2, ... , λn) * 当非对角元素接近0时,算法即可停止。 * AR, 右乘只改变 pth col and qth col djp = c ajp - s ajq djq = s ajp + c ajq RA, 左乘只改变 pth row and qth row R'AR: djp = c ajp - s ajq djq = s ajp + c ajq dpp = c² app + s² aqq - 2 c s apq dqq = s² app + c² aqq + 2 c s apq dpq = ( c² - s² ) apq + c s ( app - aqq ) 其他元素对称性可得 * 每一步使得非对角线 dpq dqp 为零 对dpq: (c² - s²)/(cs) = (aqq - app)/apq 令 s = sinΦ c = cosΦ t = tanΦ = s / c θ = cot(2Φ) = (aqq - qpp) / (2 * apq) tan2Φ = (aqq - qpp) / apq = 2 * tanΦ / (1 - tan²Φ) t² + 2tθ - 1 = 0 *------------------------------------------------*/ void eig(T esp, Mat& eigvec, Mat& eigvalue) { if (rows != cols)return; //[1] init int n = rows; eigvalue = (*this); eigvec.E(n); Mat<> R, RT; //[2] begin iteration while (true) { //[3] Calculate row p and col q int p, q; T maxelement = eigvalue[1]; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i != j && fabs(eigvalue(i, j)) >= maxelement) { maxelement = fabs(eigvalue(i, j)); p = i; q = j; } } }if (maxelement < esp)return; // [2] //[4] eigvalue eigvec T theta = 0.5 * atan2( 2 * eigvalue(p, q), eigvalue(q, q) - eigvalue(p, p) ); T c = cos(theta), s = sin(theta); // c,s R.E(n); R(p, p) = c; R(p, q) = s; // R R(q, p) = -s; R(q, q) = c; R.transpose(RT); eigvalue.mul(RT,eigvalue); // Dj = RjT Dj-1 Rj eigvalue.mul(eigvalue, R); eigvec. mul(eigvec, R); // X = R Y } } /*----------------解方程组 [ solveEquations ]---------------- * [定义]: A x = b * ps.直接x = b A~¹ 会存在数值不稳定现象 * [算法]: LUP分解 * [推导] P A = L U L: 单位下三角矩阵 U: 上三角矩阵 P: 置换矩阵 * 置换矩阵:是一个方阵,每行和每列只有一个1,其他均为0,表示矩阵初等行变化 * 置换矩阵是可逆的 * 因为置换矩阵每行只有一个1,可以变为一维数组,每行计入改行1的位置 * P b 是对b的行互换,即.由上条,相当于b[Pi] 可得 L U x = P b 令 y = U x => L y = P b 解得 y 代入 U x = y 解得 x 即. A x = P~¹ L U x = P~¹ L y = P~¹ P b = b * [过程]: [1] LUP分解 [2] LUP-Solve [3] Solve y: for i = 1 to n j=1toi-1 yi = b[Pi] - Σ Lij yj [4] Solve x: for i = n to 1 j=i+1toN x = ( yi - Σ uij xj ) / uii **--------------------------------------------*/ Mat& solveEquations(Mat& b, Mat& x) { int n = rows; x.zero(n); //[1] LUP分解 Mat U, L; Mat<int> P; LUPdecomposition(U, L, P); //[2] LUP - Solve [3] solve y for (int i = 0; i < n; i++) { x[i] = b[P[i]]; //yi for (int j = 0; j < i; j++) x[i] -= x[j] * L(i, j); } //[4] solve x for (int i = n - 1; i >= 0; i--) { for (int j = i + 1; j < n; j++) x[i] -= x[j] * U(i, j); x[i] /= U(i, i); } return x; } /*----------------LUP分解 [ LUPdecomposition ]---------------- * [定义]: P A = L U 其中 L: 单位下三角矩阵 U: 上三角矩阵 P: 置换矩阵 * 因为置换矩阵每行只有一个1,可以变为一维数组,每行计入改行1的位置 * [算法]: 高斯消元法 [1] 从其他方程中减去第1方程的倍数,以把那些方程第1变量消去。 [2] 从第3及以后方程中减去第2方程倍数,以把这些方程的第1,2变量都消去。 [3] 重复过程,直至变为上三角矩阵U,单位下三角L是由消去变量所用行的乘数组成 * 主元pivot: LPU分解中所除元素称为主元,它们处于矩阵U的对角线上。 * 选主元: 采用置换避免除0,避免除数很小(数值会不稳定)的操作 * 把第1行与第k行互换 <=> 置换矩阵Q左乘A--QA * [过程]: [1] 对于每一列 [2] 选主元 [3] 置换行,记录在P中 [4] LU分解: 高斯消元法 [5] A中包含U,L,分离出来即可 **---------------------------------------------*/ void LUPdecomposition(Mat& U, Mat& L, Mat<int>& P) { if (rows != cols)error(); int n = rows; Mat A(*this); P.zero(n); for (int i = 0; i < n; i++) P[i] = i; //[1] for (int k = 0; k < n; k++) { //[2] 选主元 T maxvalue = 0; int kt; for (int i = k; i < n; i++) { if (fabs(A(i, k)) > maxvalue) { maxvalue = fabs(A(i, k)); kt = i; } } if (maxvalue == 0) error(); // singular matrix,秩 rank<n //[3] 置换行 for (int i = 0; i < n; i++) { T t = A(k, i); A(k, i) = A(kt, i); A(kt, i) = t; } int t = P[k]; P[k] = P[kt]; P[kt] = t; //[4] LU分解: 高斯消元法 for (int i = k + 1; i < n; i++) { A(i, k) /= A(k, k); //aik存储消去该行第k位所需的乘数,即L for (int j = k + 1; j < n; j++) A(i, j) -= A(i, k) * A(k, j); //初等行变换,消去该行第k位 } } //[5] A中包含U,L,分离出来即可 U.zero(n, n); L.E(n); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) if (i > j) L(i, j) = A(i, j); else U(i, j) = A(i, j); } /*----------------构造对角矩阵 [ diag ]----------------*/ Mat& diag(Mat& ans) { Mat ansTmp; if (rows == cols) { ansTmp.alloc(rows); for (int i = 0; i < rows; i++) ansTmp[i] = (*this)(i, i); } else if (rows == 1 || cols == 1) { int n = rows > cols ? rows : cols; ansTmp.alloc(n, n); for (int i = 0; i < n; i++) ansTmp(i, i) = data[i]; } else error(); return ans.eatMat(ansTmp); } /*----------------卷积 [ conv ]----------------*/ Mat& conv(Mat& a, Mat& b, int padding = 0, int stride = 1) { Mat ansTmp( (a.rows - b.rows + 2 * padding) / stride + 1, (a.cols - b.cols + 2 * padding) / stride + 1 ); // for each element of output for (int y = 0; y < ansTmp.cols; y++) { for (int x = 0; x < ansTmp.rows; x++) { // for each element of b for (int ky = 0; ky < b.cols; ky++) { for (int kx = 0; kx < b.rows; kx++) { // get the corresponding element of a int xt = -padding + x * stride + kx, yt = -padding + y * stride + ky; ansTmp(x, y) += (xt < 0 || xt >= a.rows || yt < 0 || yt >= a.cols) ? 0 : a(xt, yt) * b(kx, ky); } } } } return eatMat(ansTmp); } /*----------------函数操作 [ function ]----------------*/ template<typename F> Mat& function(Mat& x, F&& f) { alloc(x.rows, x.cols); for (int i = 0; i < x.size(); i++) data[i] = f(x[i]); return *this; } template<typename F> Mat& function(F&& f) { for (int i = 0; i < size(); i++) data[i] = f(data[i]); return *this; } template<typename F> Mat& functionIndex(Mat& x, F&& f) { alloc(x.rows, x.cols); for (int i = 0; i < x.size(); i++) data[i] = f(x[i], i); return *this; } template<typename F> Mat& functionIndex(F&& f) { for (int i = 0; i < size(); i++) data[i] = f(data[i], i); return *this; } /****************************************************************************** * 特殊操作 ------------------------------------------------------------------------------- Mat& getCol (int _col, Mat& a) //读/写一列 [getCol/setCol] Mat& setCol (int _col, Mat& a) Mat& getRow (int _row, Mat& a) //读/写一行 [getRow/setRow] Mat& block (int rowSt, int rowEd, int colSt, int colEd, Mat& ans) //子矩阵 [block] Mat& horizStack (Mat& a, Mat& b) //水平向拼接 [horizStack ] ******************************************************************************/ /*----------------读/写一列 [getCol/setCol]----------------*/ Mat& getCol(int _col, Mat& a) { a.alloc(rows); for (int i = 0; i < rows; i++) a[i] = (*this)(i, _col); return a; } Mat& getRow(int _row, Mat& a) { a.alloc(1, cols); for (int i = 0; i < cols; i++) a[i] = (*this)(_row, i); return a; } Mat& setCol(int _col, Mat& a) { for (int i = 0; i < rows; i++) (*this)(i, _col) = a[i]; return *this; } /*----------------子矩阵 [block]----------------*/ Mat& block(int rowSt, int rowEd, int colSt, int colEd, Mat& ans) { Mat ansTmp(rowEd - rowSt + 1, colEd - colSt + 1); for (int i = rowSt; i <= rowEd; i++) for (int j = colSt; j <= colEd; j++) ansTmp(i - rowSt, j - colSt) = (*this)(i, j); return ans.eatMat(ansTmp); } Mat& block_(int rowSt, int rowEd, int colSt, int colEd, Mat& ans) { ans.alloc(rowEd - rowSt + 1, colEd - colSt + 1); for (int i = rowSt; i <= rowEd; i++) for (int j = colSt; j <= colEd; j++) ans(i - rowSt, j - colSt) = (*this)(i, j); return ans; } /*----------------拼接 [rows/colsStack]----------------*/ Mat& rowStack(Mat& a, Mat& b) { if (a.cols != b.cols)error(); Mat ansTmp(a.rows + b.rows, a.cols); for (int i = 0; i < ansTmp.rows; i++) for (int j = 0; j < ansTmp.cols; j++) ansTmp(i, j) = i < a.rows ? a(i, j) : b(i - a.rows, j); return eatMat(ansTmp); } Mat& rowStack(std::initializer_list<Mat&> list) { return *this; } Mat& colStack(Mat& a, Mat& b) { if (a.rows != b.rows)error(); Mat ansTmp(a.rows, a.cols + b.cols); for (int i = 0; i < ansTmp.rows; i++) for (int j = 0; j < ansTmp.cols; j++) ansTmp(i, j) = j < a.cols ? a(i, j) : b(i, j - a.cols); return eatMat(ansTmp); } Mat& colStack(std::initializer_list<Mat&> list) { return *this; } /*----------------复制拓展 [repeatCol]----------------*/ Mat& repeatCol(int repeatNum, Mat& ans) { if (cols != 1)error(); Mat ansTmp(rows, repeatNum); for (int i = 0; i < rows; i++) for (int j = 0; j < repeatNum; j++) ansTmp(i, j) = data[i]; return ans.eatMat(ansTmp); } }; #endif
gs.c
#include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #ifdef _OPENACC #include <openacc.h> #endif #ifdef _OPENMP #include <omp.h> #endif #include "c99.h" #include "name.h" #include "fail.h" #include "types.h" #define gs_op gs_op_t /* fix conflict with fortran */ #include "gs_defs.h" #include "gs_local.h" #include "comm.h" #include "mem.h" #include "sort.h" #include "crystal.h" #include "sarray_sort.h" #include "sarray_transfer.h" #define gs PREFIXED_NAME(gs ) #define gs_vec PREFIXED_NAME(gs_vec ) #define gs_irecv PREFIXED_NAME(gs_irecv ) #define gs_isend PREFIXED_NAME(gs_isend ) #define gs_wait PREFIXED_NAME(gs_wait ) #define gs_many PREFIXED_NAME(gs_many ) #define gs_setup PREFIXED_NAME(gs_setup ) #define gs_free PREFIXED_NAME(gs_free ) #define gs_unique PREFIXED_NAME(gs_unique) #define gs_many_isend PREFIXED_NAME(gs_many_isend ) #define gs_many_irecv PREFIXED_NAME(gs_many_irecv ) #define gs_many_wait PREFIXED_NAME(gs_many_wait ) GS_DEFINE_DOM_SIZES() /* Function prototypes */ void gs_flatmap_setup(const uint *map, int **mapf, int *mf_nt, int *m_size); static int map_size(const uint *map, int *t); typedef enum { mode_plain, mode_vec, mode_many, mode_dry_run } gs_mode; static buffer static_buffer = null_buffer; static void gather_noop( void *out, const void *in, const unsigned vn, const uint *map, gs_dom dom, gs_op op, int dstride, int mf_nt, int *mapf, int m_size, int acc) {} static void scatter_noop( void *out, const void *in, const unsigned vn, const uint *map, gs_dom dom, int dstride, int mf_nt, int *mapf, int m_size, int acc) {} static void init_noop( void *out, const unsigned vn, const uint *map, gs_dom dom, gs_op op, int dstride, int mf_nt, int *mapf, int m_size, int acc) {} /*------------------------------------------------------------------------------ Topology Discovery ------------------------------------------------------------------------------*/ struct gs_topology { ulong total_shared; /* number of globally unique shared ids */ struct array nz; /* array of nonzero_id's, grouped by id, sorted by primary index, then flag, then index */ struct array sh; /* array of shared_id's, arbitrary ordering */ struct array pr; /* array of primary_shared_id's */ }; static void gs_topology_free(struct gs_topology *top) { array_free(&top->pr); array_free(&top->sh); array_free(&top->nz); } /************** Local topology **************/ /* nonzero_ids (local part) Creates an array of s_nonzeros, one per nonzero in user id array. The output array is grouped by id. Within each group, non-flagged entries come first; otherwise the entries within the group are sorted by the index into the user id array. The first index in each group is the primary index, and is stored along with each entry. The groups themselves are ordered in increasing order of the primary index associated with the group (as opposed to the user id). */ struct nonzero_id { ulong id; uint i, flag, primary; }; static void nonzero_ids(struct array *nz, const slong *id, const uint n, buffer *buf) { ulong last_id = -(ulong)1; uint i, primary = -(uint)1; struct nonzero_id *row, *end; array_init(struct nonzero_id,nz,n), end=row=nz->ptr; for(i=0;i<n;++i) { slong id_i = id[i], abs_id = iabsl(id_i); if(id_i==0) continue; end->i = i; end->id = abs_id; end->flag = id_i!=abs_id; ++end; } nz->n = end-row; array_resize(struct nonzero_id,nz,nz->n); sarray_sort_2(struct nonzero_id,nz->ptr,nz->n, id,1, flag,0, buf); for(row=nz->ptr,end=row+nz->n;row!=end;++row) { ulong this_id = row->id; if(this_id!=last_id) primary = row->i; row->primary = primary; last_id = this_id; } sarray_sort(struct nonzero_id,nz->ptr,nz->n, primary,0, buf); } /************** Global topology **************/ /* construct list of all unique id's on this proc */ struct unique_id { ulong id; uint work_proc, src_if; }; static void unique_ids(struct array *un, const struct array *nz, const uint np) { struct unique_id *un_row; const struct nonzero_id *nz_row, *nz_end; array_init(struct unique_id,un,nz->n), un_row=un->ptr; for(nz_row=nz->ptr,nz_end=nz_row+nz->n;nz_row!=nz_end;++nz_row) { if(nz_row->i != nz_row->primary) continue; un_row->id = nz_row->id; un_row->work_proc = nz_row->id%np; un_row->src_if = nz_row->flag ? ~nz_row->i : nz_row->i; ++un_row; } un->n = un_row - (struct unique_id*)un->ptr; } /* shared_ids (global part) Creates an array of shared_id's from an array of nonzero_id's. Each entry in the output identifies one id shared with one other processor p. Note: two procs share an id only when at least one of them has it unflagged. The primary index is i locally and ri remotely. Bit 1 of flags indicates the local flag, bit 2 indicates the remote flag. The output has no particular ordering. Also creates an array of primary_shared_id's, one for each shared id. This struct includes ord, a global rank of the id (arbitrary, but unique). */ #define FLAGS_LOCAL 1 #define FLAGS_REMOTE 2 /* i : local primary index p : remote proc ri : remote primary index bi : buffer index (set and used during pw setup) */ struct shared_id { ulong id; uint i, p, ri, bi; unsigned flags; }; struct primary_shared_id { ulong id, ord; uint i; unsigned flag; }; struct shared_id_work { ulong id,ord; uint p1, p2, i1f, i2f; }; static void shared_ids_aux(struct array *sh, struct array *pr, uint pr_n, struct array *wa, buffer *buf) { const struct shared_id_work *w, *we; struct shared_id *s; struct primary_shared_id *p; ulong last_id = -(ulong)1; /* translate work array to output arrays */ sarray_sort(struct shared_id_work,wa->ptr,wa->n, id,1, buf); array_init(struct shared_id,sh,wa->n), sh->n=wa->n, s=sh->ptr; array_init(struct primary_shared_id,pr,pr_n), p=pr->ptr; for(w=wa->ptr,we=w+wa->n;w!=we;++w) { uint i1f = w->i1f, i2f = w->i2f; uint i1 = ~i1f<i1f?~i1f:i1f, i2 = ~i2f<i2f?~i2f:i2f; s->id=w->id, s->i=i1, s->p=w->p2, s->ri=i2; s->flags = ((i2f^i2)&FLAGS_REMOTE) | ((i1f^i1)&FLAGS_LOCAL); ++s; if(w->id!=last_id) { last_id=w->id; p->id=last_id, p->ord=w->ord, p->i=i1, p->flag=(i1f^i1)&FLAGS_LOCAL; ++p; } } pr->n = p-(struct primary_shared_id*)pr->ptr; sarray_sort(struct primary_shared_id,pr->ptr,pr->n, i,0, buf); } static ulong shared_ids(struct array *sh, struct array *pr, const struct array *nz, struct crystal *cr) { struct array un; struct unique_id *un_row, *un_end, *other; ulong last_id = -(ulong)1; ulong ordinal[2], n_shared=0, scan_buf[2]; struct array wa; struct shared_id_work *w; uint n_unique; /* construct list of all unique id's on this proc */ unique_ids(&un,nz,cr->comm.np); n_unique = un.n; /* transfer list to work procs */ sarray_transfer(struct unique_id,&un, work_proc,1, cr); /* group by id, put flagged entries after unflagged (within each group) */ sarray_sort_2(struct unique_id,un.ptr,un.n, id,1, src_if,0, &cr->data); /* count shared id's */ for(un_row=un.ptr,un_end=un_row+un.n;un_row!=un_end;++un_row) { ulong id = un_row->id; if(~un_row->src_if<un_row->src_if) continue; if(id==last_id) continue; other=un_row+1; if(other!=un_end&&other->id==id) last_id=id, ++n_shared; } comm_scan(ordinal, &cr->comm,gs_slong,gs_add, &n_shared,1, scan_buf); /* there are ordinal[1] globally shared unique ids; and ordinal[0] of those are seen by work procs of lower rank; i.e., this work processor sees the range ordinal[0] + (0,n_shared-1) */ /* construct list of shared ids */ last_id = -(ulong)1; array_init(struct shared_id_work,&wa,un.n), wa.n=0, w=wa.ptr; for(un_row=un.ptr,un_end=un_row+un.n;un_row!=un_end;++un_row) { ulong id = un_row->id; uint p1 = un_row->work_proc, i1f = un_row->src_if; if(~i1f<i1f) continue; for(other=un_row+1;other!=un_end&&other->id==id;++other) { uint p2 = other->work_proc, i2f = other->src_if; ulong ord; if(id!=last_id) last_id=id, ++ordinal[0]; ord=ordinal[0]-1; if(wa.n+2>wa.max) array_reserve(struct shared_id_work,&wa,wa.n+2), w=(struct shared_id_work*)wa.ptr+wa.n; w->id=id, w->ord=ord, w->p1=p1, w->p2=p2, w->i1f=i1f, w->i2f=i2f, ++w; w->id=id, w->ord=ord, w->p1=p2, w->p2=p1, w->i1f=i2f, w->i2f=i1f, ++w; wa.n+=2; } } /* transfer shared list to source procs */ sarray_transfer(struct shared_id_work,&wa, p1,0, cr); /* fill output arrays from work array */ shared_ids_aux(sh,pr,n_unique,&wa,&cr->data); array_free(&un); array_free(&wa); return ordinal[1]; } static void get_topology(struct gs_topology *top, const slong *id, uint n, struct crystal *cr) { nonzero_ids(&top->nz,id,n,&cr->data); top->total_shared = shared_ids(&top->sh,&top->pr, &top->nz,cr); } static void make_topology_unique(struct gs_topology *top, slong *id, uint pid, buffer *buf) { struct array *const nz=&top->nz, *const sh=&top->sh, *const pr=&top->pr; struct nonzero_id *pnz; struct shared_id *pb, *pe, *e, *out; struct primary_shared_id *q; /* flag local non-primaries */ sarray_sort(struct nonzero_id,nz->ptr,nz->n, i,0, buf); if(id) { struct nonzero_id *p,*e; for(p=nz->ptr,e=p+nz->n;p!=e;++p) if(p->i != p->primary) id[p->i]=-(slong)p->id,p->flag=1; } else { struct nonzero_id *p,*e; for(p=nz->ptr,e=p+nz->n;p!=e;++p) if(p->i != p->primary) p->flag=1; } sarray_sort(struct nonzero_id,nz->ptr,nz->n, primary,0, buf); /* assign owner among shared primaries */ /* create sentinel with i = -1 */ array_reserve(struct shared_id,sh,sh->n+1); ((struct shared_id*)sh->ptr)[sh->n].i = -(uint)1; /* in the sorted list of procs sharing a given id, the owner is chosen to be the j^th unflagged proc, where j = id mod (length of list) */ sarray_sort_2(struct shared_id,sh->ptr,sh->n, i,0, p,0, buf); out=sh->ptr; pnz=top->nz.ptr; for(pb=sh->ptr,e=pb+sh->n;pb!=e;pb=pe) { uint i = pb->i, lt=0,gt=0, owner; struct shared_id *p; while(pnz->i!=i) ++pnz; /* note: current proc not in list */ for(pe=pb; pe->i==i && pe->p<pid; ++pe) if(!(pe->flags&FLAGS_REMOTE)) ++lt; for( ; pe->i==i ; ++pe) if(!(pe->flags&FLAGS_REMOTE)) ++gt; if(!(pb->flags&FLAGS_LOCAL)) { owner = pb->id%(lt+gt+1); if(owner==lt) goto make_sh_unique_mine; if(owner>lt) --owner; } else owner = pb->id%(lt+gt); /* we don't own pb->id */ if(id) id[i] = -(slong)pb->id; pnz->flag=1; /* we only share this id with the owner now; remove the other entries */ for(p=pb; p!=pe; ++p) if(!(p->flags&FLAGS_REMOTE) && !(owner--)) break; if(p!=pe) *out=*p, out->flags=FLAGS_LOCAL, ++out; continue; make_sh_unique_mine: /* we own pb->id */ if(out==pb) { out=pe; for(p=pb; p!=pe; ++p) p->flags=FLAGS_REMOTE; } else for(p=pb; p!=pe; ++p) *out=*p,out->flags=FLAGS_REMOTE,++out; } sh->n = out - ((struct shared_id*)sh->ptr); /* set primary_shared_id flags to match */ ((struct shared_id*)sh->ptr)[sh->n].i = -(uint)1; sarray_sort(struct shared_id,sh->ptr,sh->n, id,1, buf); sarray_sort(struct primary_shared_id,pr->ptr,pr->n, id,1, buf); q=pr->ptr; for(pb=sh->ptr,e=pb+sh->n;pb!=e;pb=pe) { uint i=pb->i; pe=pb; while(pe->i==i) ++pe; if(q->id!=pb->id) printf("FAIL!!!\n"); q->flag=pb->flags&FLAGS_LOCAL; ++q; } } /*------------------------------------------------------------------------------ Local setup ------------------------------------------------------------------------------*/ /* assumes nz is sorted by primary, then flag, then index */ static const uint *local_map(const struct array *nz, const int ignore_flagged, uint *mem_size) { uint *map, *p, count = 1; const struct nonzero_id *row, *other, *end; #define DO_COUNT(cond) do \ for(row=nz->ptr,end=row+nz->n;row!=end;) { \ ulong row_id = row->id; int any=0; \ for(other=row+1;other!=end&&other->id==row_id&&cond;++other) \ any=2, ++count; \ count+=any, row=other; \ } while(0) if(ignore_flagged) DO_COUNT(other->flag==0); else DO_COUNT(1); #undef DO_COUNT p = map = tmalloc(uint,count); *mem_size += count*sizeof(uint); #define DO_SET(cond) do \ for(row=nz->ptr,end=row+nz->n;row!=end;) { \ ulong row_id = row->id; int any=0; \ *p++ = row->i; \ for(other=row+1;other!=end&&other->id==row_id&&cond;++other) \ any=1, *p++ = other->i; \ if(any) *p++ = -(uint)1; else --p; \ row=other; \ } while(0) if(ignore_flagged) DO_SET(other->flag==0); else DO_SET(1); #undef DO_SET *p = -(uint)1; return map; } static const uint *flagged_primaries_map(const struct array *nz, uint *mem_size) { uint *map, *p, count=1; const struct nonzero_id *row, *end; for(row=nz->ptr,end=row+nz->n;row!=end;++row) if(row->i==row->primary && row->flag==1) ++count; p = map = tmalloc(uint,count); *mem_size += count*sizeof(uint); for(row=nz->ptr,end=row+nz->n;row!=end;++row) if(row->i==row->primary && row->flag==1) *p++ = row->i; *p = -(uint)1; return map; } /*------------------------------------------------------------------------------ Remote execution and setup ------------------------------------------------------------------------------*/ typedef void exec_fun( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf, int dstride, int acc, int bufSize); typedef void fin_fun(void *data); struct gs_remote { uint buffer_size, mem_size; void *data; exec_fun *exec; exec_fun *exec_isend; exec_fun *exec_irecv; exec_fun *exec_wait; fin_fun *fin; }; typedef void setup_fun(struct gs_remote *r, struct gs_topology *top, const struct comm *comm, buffer *buf,int dstride); /*------------------------------------------------------------------------------ Pairwise Execution ------------------------------------------------------------------------------*/ struct pw_comm_data { uint n; /* number of messages */ uint *p; /* message source/dest proc */ uint *size; /* size of message */ uint total; /* sum of message sizes */ }; struct pw_data { struct pw_comm_data comm[2]; const uint *map[2]; int *mapf[2]; int mf_nt[2]; int mf_size[2]; comm_req *req; uint buffer_size; }; static char *pw_exec_recvs(char *buf, const unsigned unit_size, const struct comm *comm, const struct pw_comm_data *c, comm_req *req) { const uint *p, *pe, *size=c->size; #ifdef GPUDIRECT #pragma data present(buf) #endif for(p=c->p,pe=p+c->n;p!=pe;++p) { size_t len = *(size++)*unit_size; #ifdef GPUDIRECT #pragma host_data use_device(buf) #endif comm_irecv(req++,comm,buf,len,*p,*p); buf += len; } return buf; } static char *pw_exec_sends(char *buf, const unsigned unit_size, const struct comm *comm, const struct pw_comm_data *c, comm_req *req) { const uint *p, *pe, *size=c->size; #ifdef GPUDIRECT #pragma data present(buf) #endif for(p=c->p,pe=p+c->n;p!=pe;++p) { size_t len = *(size++)*unit_size; #ifdef GPUDIRECT #pragma host_data use_device(buf) #endif comm_isend(req++,comm,buf,len,*p,comm->id); buf += len; } return buf; } static void pw_exec( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf,int dstride,int acc,int bufSize) { const struct pw_data *pwd = execdata; static gs_scatter_fun *const scatter_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_gather_fun *const gather_from_buf[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec_to_many, &gather_noop }; const unsigned recv = 0^transpose, send = 1^transpose; unsigned unit_size = vn*gs_dom_size[dom]; int i; char *sendbuf; /* post receives */ // printf("r:pwe: %d %lX %lX %d:\n",pwd->comm[recv].n,(pwd->comm[recv].p),(pwd->comm[recv].size),pwd->comm[recv].total); //printf("s:pwe: %d %lX %lX %d:\n",pwd->comm[send].n,(pwd->comm[send].p),(pwd->comm[send].size),pwd->comm[send].total); sendbuf = pw_exec_recvs(buf,unit_size,comm,&pwd->comm[recv],pwd->req); /* fill send buffer */ // printf("mode: %d\n",mode); scatter_to_buf[mode](sendbuf,data,vn,pwd->map[send],dom,dstride,pwd->mf_nt[send], pwd->mapf[send],pwd->mf_size[send],acc); double* t = data; #pragma omp target update from(sendbuf[0:unit_size*bufSize/2]) if(acc) #pragma acc update host(sendbuf[0:unit_size*bufSize/2]) if(acc) /* post sends */ pw_exec_sends(sendbuf,unit_size,comm,&pwd->comm[send], &pwd->req[pwd->comm[recv].n]); comm_wait(pwd->req,pwd->comm[0].n+pwd->comm[1].n); #pragma omp target update to(buf[0:unit_size*bufSize/2]) if(acc) #pragma acc update device(buf[0:unit_size*bufSize/2]) if(acc) //#pragma update device(pwd->map[recv],pwd->mapf[recv]) /* gather using recv buffer */ gather_from_buf[mode](data,buf,vn,pwd->map[recv],dom,op,dstride,pwd->mf_nt[recv], pwd->mapf[recv],pwd->mf_size[recv],acc); } /*------------------------------------------------------------------------------ Pairwise Nonblocking ------------------------------------------------------------------------------*/ static void pw_exec_irecv( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf,int dstride,int acc,int bufSize) { const struct pw_data *pwd = execdata; static gs_scatter_fun *const scatter_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_gather_fun *const gather_from_buf[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec_to_many, &gather_noop }; const unsigned recv = 0^transpose, send = 1^transpose; unsigned unit_size = vn*gs_dom_size[dom]; char* sendbuf; int i; /* post receives */ /* printf("r:pwe: %d %lX %lX %d:\n",pwd->comm[recv].n,(pwd->comm[recv].p),(pwd->comm[recv].size),pwd->comm[recv].total); */ /* printf("s:pwe: %d %lX %lX %d:\n",pwd->comm[send].n,(pwd->comm[send].p),(pwd->comm[send].size),pwd->comm[send].total); */ sendbuf = pw_exec_recvs(buf,unit_size,comm,&pwd->comm[recv],pwd->req); } static void pw_exec_isend( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf,int dstride,int acc,int bufSize) { const struct pw_data *pwd = execdata; static gs_scatter_fun *const scatter_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_gather_fun *const gather_from_buf[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec_to_many, &gather_noop }; const unsigned recv = 0^transpose, send = 1^transpose; unsigned unit_size = vn*gs_dom_size[dom]; const uint *p, *pe, *size; int i; char *sendbuf; const struct pw_comm_data *c = &pwd->comm[recv]; size=c->size; sendbuf = buf+unit_size*pwd->comm[recv].total; /* fill send buffer */ scatter_to_buf[mode](sendbuf,data,vn,pwd->map[send],dom,dstride,pwd->mf_nt[send], pwd->mapf[send],pwd->mf_size[send],acc); #pragma omp target update from(sendbuf[0:unit_size*bufSize/2]) if(acc) #pragma acc update host(sendbuf[0:unit_size*bufSize/2]) if(acc) /* post sends */ pw_exec_sends(sendbuf,unit_size,comm,&pwd->comm[send], &pwd->req[pwd->comm[recv].n]); } static void pw_exec_wait( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf,int dstride,int acc,int bufSize) { const struct pw_data *pwd = execdata; static gs_scatter_fun *const scatter_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_gather_fun *const gather_from_buf[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec_to_many, &gather_noop }; const unsigned recv = 0^transpose, send = 1^transpose; unsigned unit_size = vn*gs_dom_size[dom]; int i; comm_wait(pwd->req,pwd->comm[0].n+pwd->comm[1].n); #pragma omp target update to(buf[0:unit_size*bufSize/2]) if(acc) #pragma acc update device(buf[0:unit_size*bufSize/2]) if(acc) //#pragma update device(pwd->map[recv],pwd->mapf[recv]) /* gather using recv buffer */ gather_from_buf[mode](data,buf,vn,pwd->map[recv],dom,op,dstride,pwd->mf_nt[recv], pwd->mapf[recv],pwd->mf_size[recv],acc); } /*------------------------------------------------------------------------------ Pairwise setup ------------------------------------------------------------------------------*/ static uint pw_comm_setup(struct pw_comm_data *data, struct array *sh, const unsigned flags_mask, buffer *buf) { uint n=0,count=0, lp=-(uint)1, mem_size=0; struct shared_id *s, *se; /* sort by remote processor and id (a globally consistent ordering) */ sarray_sort_2(struct shared_id,sh->ptr,sh->n, p,0, id,1, buf); /* assign index into buffer */ for(s=sh->ptr,se=s+sh->n;s!=se;++s) { if(s->flags&flags_mask) { s->bi = -(uint)1; continue; } s->bi = count++; if(s->p!=lp) lp=s->p, ++n; } data->n = n; data->p = tmalloc(uint,2*n); mem_size+=2*n*sizeof(uint); data->size = data->p + n; data->total = count; n = 0, lp=-(uint)1; for(s=sh->ptr,se=s+sh->n;s!=se;++s) { if(s->flags&flags_mask) continue; if(s->p!=lp) { lp=s->p; if(n!=0) data->size[n-1] = count; count=0, data->p[n++]=lp; } ++count; } if(n!=0) data->size[n-1] = count; return mem_size; } static void pw_comm_free(struct pw_comm_data *data) { free(data->p); } /* assumes that the bi field of sh is set */ static const uint *pw_map_setup(struct array *sh, buffer *buf, uint *mem_size) { uint count=0, *map, *p; struct shared_id *s, *se; sarray_sort(struct shared_id,sh->ptr,sh->n, i,0, buf); /* calculate map size */ count=1; for(s=sh->ptr,se=s+sh->n;s!=se;) { uint i=s->i; if(s->bi==-(uint)1) { ++s; continue; } count+=3; for(++s;s!=se&&s->i==i;++s) if(s->bi!=-(uint)1) ++count; } /* write map */ p = map = tmalloc(uint,count); *mem_size += count*sizeof(uint); for(s=sh->ptr,se=s+sh->n;s!=se;) { uint i=s->i; if(s->bi==-(uint)1) { ++s; continue; } *p++ = i, *p++ = s->bi; for(++s;s!=se&&s->i==i;++s) if(s->bi!=-(uint)1) *p++ = s->bi; *p++ = -(uint)1; } *p = -(uint)1; return map; } static struct pw_data *pw_setup_aux(struct array *sh, buffer *buf, uint *mem_size) { struct pw_data *pwd = tmalloc(struct pw_data,1); *mem_size = sizeof(struct pw_data); /* default behavior: receive only remotely unflagged data */ *mem_size+=pw_comm_setup(&pwd->comm[0],sh, FLAGS_REMOTE, buf); pwd->map[0] = pw_map_setup(sh, buf, mem_size); /* Get flattened map */ gs_flatmap_setup(pwd->map[0],&(pwd->mapf[0]),&(pwd->mf_nt[0]),&(pwd->mf_size[0])); /* default behavior: send only locally unflagged data */ *mem_size+=pw_comm_setup(&pwd->comm[1],sh, FLAGS_LOCAL, buf); pwd->map[1] = pw_map_setup(sh, buf, mem_size); /* Get flattened map */ gs_flatmap_setup(pwd->map[1],&(pwd->mapf[1]),&(pwd->mf_nt[1]),&(pwd->mf_size[1])); pwd->req = tmalloc(comm_req,pwd->comm[0].n+pwd->comm[1].n); *mem_size += (pwd->comm[0].n+pwd->comm[1].n)*sizeof(comm_req); pwd->buffer_size = pwd->comm[0].total + pwd->comm[1].total; return pwd; } static void pw_free(struct pw_data *data) { const uint *map0 = data->map[0],*map1 = data->map[1]; pw_comm_free(&data->comm[0]); pw_comm_free(&data->comm[1]); #pragma omp target exit data map(release:map0,map1) #pragma acc exit data delete(map0,map1) free((uint*)data->map[0]); free((uint*)data->map[1]); free(data->req); free(data); } static void pw_setup(struct gs_remote *r, struct gs_topology *top, const struct comm *comm, buffer *buf,int dstride) { struct pw_data *pwd = pw_setup_aux(&top->sh,buf, &r->mem_size); r->buffer_size = pwd->buffer_size; r->data = pwd; r->exec = (exec_fun*)&pw_exec; r->exec_irecv = (exec_fun*)&pw_exec_irecv; r->exec_isend = (exec_fun*)&pw_exec_isend; r->exec_wait = (exec_fun*)&pw_exec_wait; r->fin = (fin_fun*)&pw_free; } /*------------------------------------------------------------------------------ Crystal-Router Execution ------------------------------------------------------------------------------*/ struct cr_stage { const uint *scatter_map, *gather_map; int *scatter_mapf, *gather_mapf; int s_nt,g_nt; int s_size,g_size; uint size_r, size_r1, size_r2; uint size_sk, size_s, size_total; uint p1, p2; unsigned nrecvn; }; struct cr_data { struct cr_stage *stage[2]; unsigned nstages; uint buffer_size, stage_buffer_size; comm_req *req; }; static void cr_exec( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf,int dstride,int acc,int bufSize) { const struct cr_data *crd = execdata; static gs_scatter_fun *const scatter_user_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_scatter_fun *const scatter_buf_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec, &gs_scatter }; static gs_scatter_fun *const scatter_buf_to_user[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec_to_many, &scatter_noop }; static gs_gather_fun *const gather_buf_to_user[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec_to_many, &gather_noop }; static gs_gather_fun *const gather_buf_to_buf[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec, &gs_gather }; const unsigned unit_size = vn*gs_dom_size[dom], nstages=crd->nstages; unsigned k; int id; char *sendbuf, *buf_old, *buf_new; const struct cr_stage *stage = crd->stage[transpose]; buf_old = buf; buf_new = buf_old + unit_size*crd->stage_buffer_size; /* crystal router */ for(k=0;k<nstages;++k) { comm_req req[3]; if(stage[k].nrecvn) comm_irecv(&req[1],comm,buf_new,unit_size*stage[k].size_r1, stage[k].p1, comm->np+k); if(stage[k].nrecvn==2) comm_irecv(&req[2],comm,buf_new+unit_size*stage[k].size_r1, unit_size*stage[k].size_r2, stage[k].p2, comm->np+k); sendbuf = buf_new+unit_size*stage[k].size_r; // printf("%d\n",mode); if(k==0) scatter_user_to_buf[mode](sendbuf,data,vn,stage[0].scatter_map,dom,dstride, stage[0].s_nt,stage[0].scatter_mapf,stage[0].s_size,acc); else scatter_buf_to_buf[mode](sendbuf,buf_old,vn,stage[k].scatter_map,dom,dstride, stage[k].s_nt,stage[k].scatter_mapf,stage[k].s_size,acc), gather_buf_to_buf [mode](sendbuf,buf_old,vn,stage[k].gather_map ,dom,op,dstride, stage[k].g_nt,stage[k].gather_mapf,stage[k].g_size,acc); //Need to update gather vec and scatter vec! #pragma omp target update from(buf[0:unit_size*bufSize]) if(acc) #pragma acc update host(buf[0:unit_size*bufSize]) if(acc) comm_isend(&req[0],comm,sendbuf,unit_size*stage[k].size_s, stage[k].p1, comm->np+k); comm_wait(&req[0],1+stage[k].nrecvn); #pragma omp target update to(buf[0:unit_size*bufSize]) if(acc) #pragma acc update device(buf[0:unit_size*bufSize]) if(acc) { char *t = buf_old; buf_old=buf_new; buf_new=t; } } scatter_buf_to_user[mode](data,buf_old,vn,stage[k].scatter_map,dom,dstride, stage[k].s_nt,stage[k].scatter_mapf,stage[k].s_size,acc); gather_buf_to_user [mode](data,buf_old,vn,stage[k].gather_map ,dom,op,dstride, stage[k].g_nt,stage[k].gather_mapf,stage[k].g_size,acc); } /*------------------------------------------------------------------------------ Crystal-Router non blocking ------------------------------------------------------------------------------*/ static void cr_exec_irecv( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf,int dstride,int acc,int bufSize) { const struct cr_data *crd = execdata; static gs_scatter_fun *const scatter_user_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_scatter_fun *const scatter_buf_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec, &gs_scatter }; static gs_scatter_fun *const scatter_buf_to_user[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec_to_many, &scatter_noop }; static gs_gather_fun *const gather_buf_to_user[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec_to_many, &gather_noop }; static gs_gather_fun *const gather_buf_to_buf[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec, &gs_gather }; const unsigned unit_size = vn*gs_dom_size[dom], nstages=crd->nstages; unsigned k; int id; char *sendbuf, *buf_old, *buf_new; const struct cr_stage *stage = crd->stage[transpose]; buf_old = buf; buf_new = buf_old + unit_size*crd->stage_buffer_size; /* crystal router */ for(k=0;k<nstages;++k) { comm_req req[2]; if(stage[k].nrecvn) comm_irecv(&req[0],comm,buf_new,unit_size*stage[k].size_r1, stage[k].p1, comm->np+k); if(stage[k].nrecvn==2) comm_irecv(&req[1],comm,buf_new+unit_size*stage[k].size_r1, unit_size*stage[k].size_r2, stage[k].p2, comm->np+k); } } static void cr_exec_isend( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf,int dstride,int acc,int bufSize) { const struct cr_data *crd = execdata; static gs_scatter_fun *const scatter_user_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_scatter_fun *const scatter_buf_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec, &gs_scatter }; static gs_scatter_fun *const scatter_buf_to_user[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec_to_many, &scatter_noop }; static gs_gather_fun *const gather_buf_to_user[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec_to_many, &gather_noop }; static gs_gather_fun *const gather_buf_to_buf[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec, &gs_gather }; const unsigned unit_size = vn*gs_dom_size[dom], nstages=crd->nstages; unsigned k; int id; comm_req req[1]; char *sendbuf, *buf_old, *buf_new; const struct cr_stage *stage = crd->stage[transpose]; buf_old = buf; buf_new = buf_old + unit_size*crd->stage_buffer_size; /* crystal router */ for(k=0;k<nstages;++k) { sendbuf = buf_new+unit_size*stage[k].size_r; // printf("%d\n",mode); if(k==0) scatter_user_to_buf[mode](sendbuf,data,vn,stage[0].scatter_map,dom,dstride, stage[0].s_nt,stage[0].scatter_mapf,stage[0].s_size,acc); else scatter_buf_to_buf[mode](sendbuf,buf_old,vn,stage[k].scatter_map,dom,dstride, stage[k].s_nt,stage[k].scatter_mapf,stage[k].s_size,acc), gather_buf_to_buf [mode](sendbuf,buf_old,vn,stage[k].gather_map ,dom,op,dstride, stage[k].g_nt,stage[k].gather_mapf,stage[k].g_size,acc); //Need to update gather vec and scatter vec! #pragma omp target update from(buf[0:unit_size*bufSize]) if(acc) #pragma acc update host(buf[0:unit_size*bufSize]) if(acc) comm_isend(&req[0],comm,sendbuf,unit_size*stage[k].size_s, stage[k].p1, comm->np+k); comm_wait(&req[0],1+stage[k].nrecvn); } scatter_buf_to_user[mode](data,buf_old,vn,stage[k].scatter_map,dom,dstride, stage[k].s_nt,stage[k].scatter_mapf,stage[k].s_size,acc); gather_buf_to_user [mode](data,buf_old,vn,stage[k].gather_map ,dom,op,dstride, stage[k].g_nt,stage[k].gather_mapf,stage[k].g_size,acc); } static void cr_exec_wait( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf,int dstride,int acc,int bufSize) { const struct cr_data *crd = execdata; static gs_scatter_fun *const scatter_user_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_scatter_fun *const scatter_buf_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec, &gs_scatter }; static gs_scatter_fun *const scatter_buf_to_user[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec_to_many, &scatter_noop }; static gs_gather_fun *const gather_buf_to_user[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec_to_many, &gather_noop }; static gs_gather_fun *const gather_buf_to_buf[] = { &gs_gather, &gs_gather_vec, &gs_gather_vec, &gs_gather }; const unsigned unit_size = vn*gs_dom_size[dom], nstages=crd->nstages; unsigned k; int id; char *sendbuf, *buf_old, *buf_new; const struct cr_stage *stage = crd->stage[transpose]; buf_old = buf; buf_new = buf_old + unit_size*crd->stage_buffer_size; for(k=0;k<nstages;++k) { comm_wait(&(crd->req[k]),1+stage[k].nrecvn); #pragma omp target update to(buf[0:unit_size*bufSize]) if(acc) #pragma acc update device(buf[0:unit_size*bufSize]) if(acc) { char *t = buf_old; buf_old=buf_new; buf_new=t; } } scatter_buf_to_user[mode](data,buf_old,vn,stage[k].scatter_map,dom,dstride, stage[k].s_nt,stage[k].scatter_mapf,stage[k].s_size,acc); gather_buf_to_user [mode](data,buf_old,vn,stage[k].gather_map ,dom,op,dstride, stage[k].g_nt,stage[k].gather_mapf,stage[k].g_size,acc); } /*------------------------------------------------------------------------------ Crystal-Router setup ------------------------------------------------------------------------------*/ static uint cr_schedule(struct cr_data *data, const struct comm *comm) { uint mem_size = 0; const uint id = comm->id; uint bl=0, n=comm->np; unsigned k = 0; while(n>1) { uint nl = (n+1)/2, bh = bl+nl; if(id<bh) n=nl; else n-=nl,bl=bh; ++k; } data->nstages = k; data->stage[0] = tmalloc(struct cr_stage,2*(k+1)); data->stage[1] = data->stage[0] + (k+1); data->req = tmalloc(comm_req,k); mem_size += 2*(k+1)*sizeof(struct cr_stage)+k*sizeof(comm_req); bl=0, n=comm->np, k=0; while(n>1) { uint nl = (n+1)/2, bh = bl+nl; uint targ; unsigned recvn; recvn = 1, targ = n-1-(id-bl)+bl; if(id==targ) targ=bh, recvn=0; if(n&1 && id==bh) recvn=2; data->stage[1][k].nrecvn=data->stage[0][k].nrecvn=recvn; data->stage[1][k].p1 =data->stage[0][k].p1 =targ; data->stage[1][k].p2 =data->stage[0][k].p2 =comm->id-1; if(id<bh) n=nl; else n-=nl,bl=bh; ++k; } return mem_size; } struct crl_id { ulong id; uint p, ri, si, bi, send; }; /* assumes sh is grouped by i (e.g., sorted by i or by id) */ static void crl_work_init(struct array *cw, struct array *sh, const unsigned send_mask, uint this_p) { const unsigned recv_mask = send_mask^(FLAGS_REMOTE|FLAGS_LOCAL); uint last_i=-(uint)1; int added_myself; uint cw_n = 0, cw_max = cw->max; struct crl_id *w = cw->ptr; struct shared_id *s, *se; #define CW_ADD(aid,ap,ari,asi) do { \ if(cw_n==cw_max) \ array_reserve(struct crl_id,cw,cw_n+1),cw_max=cw->max, \ w=(struct crl_id*)cw->ptr+cw_n; \ w->id=aid, w->p=ap, w->ri=ari, w->si=asi; \ ++w, ++cw_n; \ } while(0) for(s=sh->ptr,se=s+sh->n;s!=se;++s) { int send = (s->flags&send_mask)==0; int recv = (s->flags&recv_mask)==0; if(s->i!=last_i) last_i=s->i, added_myself=0; if(!added_myself && recv && (s->flags&FLAGS_LOCAL)==0) { added_myself=1; CW_ADD(s->id,this_p,s->i,s->i); } if(send) CW_ADD(s->id,s->p,s->ri,s->i); } cw->n=cw_n; #undef CW_ADD } static uint crl_maps(struct cr_stage *stage, struct array *cw, buffer *buf) { uint mem_size=0; struct crl_id *w, *we, *other; uint scount=1, gcount=1, *sp, *gp; sarray_sort_2(struct crl_id,cw->ptr,cw->n, bi,0, si,0, buf); for(w=cw->ptr,we=w+cw->n;w!=we;w=other) { uint bi=w->bi,any=0,si=w->si; scount+=3; for(other=w+1;other!=we&&other->bi==bi;++other) if(other->si!=si) si=other->si, any=2, ++gcount; gcount+=any; } stage->scatter_map = sp = tmalloc(uint,scount+gcount); stage->gather_map = gp = sp + scount; mem_size += (scount+gcount)*sizeof(uint); for(w=cw->ptr,we=w+cw->n;w!=we;w=other) { uint bi=w->bi,any=0,si=w->si; *sp++ = w->si, *sp++ = bi; *gp++ = bi; for(other=w+1;other!=we&&other->bi==bi;++other) if(other->si!=si) si=other->si, any=1, *gp++ = si; if(any) *gp++ = -(uint)1; else --gp; *sp++ = -(uint)1; } *sp=-(uint)1, *gp=-(uint)1; return mem_size; } static uint crl_work_label(struct array *cw, struct cr_stage *stage, uint cutoff, int send_hi, buffer *buf, uint *mem_size) { struct crl_id *w, *we, *start; uint nsend, nkeep = 0, nks = 0, bi=0; /* here w->send has a reverse meaning */ if(send_hi) for(w=cw->ptr,we=w+cw->n;w!=we;++w) w->send = w->p< cutoff; else for(w=cw->ptr,we=w+cw->n;w!=we;++w) w->send = w->p>=cutoff; sarray_sort_2(struct crl_id,cw->ptr,cw->n, id,1, send,0, buf); for(start=cw->ptr,w=start,we=w+cw->n;w!=we;++w) { nkeep += w->send; if(w->id!=start->id) start=w; if(w->send!=start->send) w->send=0,w->bi=1, ++nks; else w->bi=0; } nsend = cw->n-nkeep; /* assign indices; sent ids have priority (hence w->send is reversed) */ sarray_sort(struct crl_id,cw->ptr,cw->n, send,0, buf); for(start=cw->ptr,w=start,we=w+nsend+nks;w!=we;++w) { if(w->id!=start->id) start=w, ++bi; if(w->bi!=1) w->send=1; /* switch back to the usual semantics */ w->bi = bi; } stage->size_s = nsend+nks==0 ? 0 : bi+1; for(we=(struct crl_id*)cw->ptr+cw->n;w!=we;++w) { if(w->id!=start->id) start=w, ++bi; w->send = 0; /* switch back to the usual semantics */ w->bi = bi; } stage->size_sk = cw->n==0 ? 0 : bi+1; *mem_size += crl_maps(stage,cw,buf); return nsend; } static void crl_bi_to_si(struct crl_id *w, uint n, uint v) { for(;n;--n) w->si=w->bi+v, ++w; } static void crl_ri_to_bi(struct crl_id *w, uint n) { for(;n;--n) w->bi=w->ri, ++w; } static uint cr_learn(struct array *cw, struct cr_stage *stage, const struct comm *comm, buffer *buf, uint *mem_size) { comm_req req[3]; const uint id = comm->id; uint bl=0, n=comm->np; uint size_max=0; uint tag = comm->np; while(n>1) { uint nl = (n+1)/2, bh = bl+nl; uint nkeep, nsend[2], nrecv[2][2] = {{0,0},{0,0}}; struct crl_id *wrecv[2], *wsend; nsend[0] = crl_work_label(cw,stage,bh,id<bh,buf, mem_size); nsend[1] = stage->size_s; nkeep = cw->n - nsend[0]; if(stage->nrecvn ) comm_irecv(&req[1],comm,nrecv[0],2*sizeof(uint), stage->p1,tag); if(stage->nrecvn==2) comm_irecv(&req[2],comm,nrecv[1],2*sizeof(uint), stage->p2,tag); comm_isend(&req[0],comm,nsend,2*sizeof(uint),stage->p1,tag); comm_wait(req,1+stage->nrecvn),++tag; stage->size_r1 = nrecv[0][1], stage->size_r2 = nrecv[1][1]; stage->size_r = stage->size_r1 + stage->size_r2; stage->size_total = stage->size_r + stage->size_sk; if(stage->size_total>size_max) size_max=stage->size_total; array_reserve(struct crl_id,cw,cw->n+nrecv[0][0]+nrecv[1][0]); wrecv[0] = cw->ptr, wrecv[0] += cw->n, wrecv[1] = wrecv[0]+nrecv[0][0]; wsend = cw->ptr, wsend += nkeep; if(stage->nrecvn ) comm_irecv(&req[1],comm,wrecv[0],nrecv[0][0]*sizeof(struct crl_id), stage->p1,tag); if(stage->nrecvn==2) comm_irecv(&req[2],comm,wrecv[1],nrecv[1][0]*sizeof(struct crl_id), stage->p2,tag); sarray_sort_2(struct crl_id,cw->ptr,cw->n, send,0, bi,0, buf); comm_isend(&req[0],comm,wsend,nsend[0]*sizeof(struct crl_id),stage->p1,tag); comm_wait(req,1+stage->nrecvn),++tag; crl_bi_to_si(cw->ptr,nkeep,stage->size_r); if(stage->nrecvn) crl_bi_to_si(wrecv[0],nrecv[0][0],0); if(stage->nrecvn==2) crl_bi_to_si(wrecv[1],nrecv[1][0],stage->size_r1); memmove(wsend,wrecv[0],(nrecv[0][0]+nrecv[1][0])*sizeof(struct crl_id)); cw->n += nrecv[0][0] + nrecv[1][0]; cw->n -= nsend[0]; if(id<bh) n=nl; else n-=nl,bl=bh; ++stage; } crl_ri_to_bi(cw->ptr,cw->n); *mem_size += crl_maps(stage,cw,buf); return size_max; } static struct cr_data *cr_setup_aux( struct array *sh, const struct comm *comm, buffer *buf, uint *mem_size) { uint size_max[2],i,k; struct array cw = null_array; struct cr_data *crd = tmalloc(struct cr_data,1); *mem_size = sizeof(struct cr_data); /* default behavior: receive only remotely unflagged data */ /* default behavior: send only locally unflagged data */ *mem_size += cr_schedule(crd,comm); sarray_sort(struct shared_id,sh->ptr,sh->n, i,0, buf); crl_work_init(&cw,sh, FLAGS_LOCAL , comm->id); size_max[0]=cr_learn(&cw,crd->stage[0],comm,buf, mem_size); crl_work_init(&cw,sh, FLAGS_REMOTE, comm->id); size_max[1]=cr_learn(&cw,crd->stage[1],comm,buf, mem_size); crd->stage_buffer_size = size_max[1]>size_max[0]?size_max[1]:size_max[0]; array_free(&cw); crd->buffer_size = 2*crd->stage_buffer_size; /* Get the flat maps for the CR */ const struct cr_stage *stage = crd->stage[0]; for(i=0;i<crd->nstages;i++){ if(i==0){ gs_flatmap_setup(stage[0].scatter_map,(int**)&(stage[0].scatter_mapf),(int*)&(stage[0].s_nt), (int*)&(stage[0].s_size)); //#pragma acc enter data copyin(stage[0].scatter_map[0:stage[0].s_size],stage[0].scatter_mapf[0:stage[0].s_nt]) } else { gs_flatmap_setup(stage[i].scatter_map,(int**)&(stage[i].scatter_mapf),(int*)&(stage[i].s_nt), (int*)&(stage[i].s_size)); gs_flatmap_setup(stage[i].gather_map,(int**)&(stage[i].gather_mapf),(int*)&(stage[i].g_nt), (int*)&(stage[i].g_size)); //#pragma acc enter data copyin(stage[i].scatter_map[i:stage[i].s_size],stage[i].scatter_mapf[i:stage[i].s_nt]) //#pragma acc enter data copyin(stage[i].gather_map[i:stage[i].g_size],stage[i].gather_mapf[i:stage[i].g_nt]) } } gs_flatmap_setup(stage[i].scatter_map,(int**)&(stage[i].scatter_mapf),(int*)&(stage[i].s_nt), (int*)&(stage[i].s_size)); gs_flatmap_setup(stage[i].gather_map,(int**)&(stage[i].gather_mapf),(int*)&(stage[i].g_nt), (int*)&(stage[i].g_size)); //#pragma acc enter data copyin(stage[i].scatter_map[i:stage[i].s_size],stage[i].scatter_mapf[i:stage[i].s_nt]) //#pragma acc enter data copyin(stage[i].gather_map[i:stage[i].g_size],stage[i].gather_mapf[i:stage[i].g_nt]) const struct cr_stage *stage2 = crd->stage[1]; for(i=0;i<crd->nstages;i++){ if(i==0){ gs_flatmap_setup(stage2[0].scatter_map,(int**)&(stage2[0].scatter_mapf), (int*)&(stage2[0].s_nt),(int*)&(stage2[0].s_size)); //#pragma acc enter data copyin(stage2[0].scatter_map[0:stage2[0].s_size],stage2[0].scatter_mapf[0:stage2[0].s_nt]) } else { gs_flatmap_setup(stage2[i].scatter_map,(int**)&(stage2[i].scatter_mapf), (int*)&(stage2[i].s_nt),(int*)&(stage2[i].s_size)); gs_flatmap_setup(stage2[i].gather_map,(int**)&(stage2[i].gather_mapf), (int*)&(stage2[i].g_nt),(int*)&(stage2[i].g_size)); //#pragma acc enter data copyin(stage2[i].scatter_map[i:stage2[i].s_size],stage2[i].scatter_mapf[i:stage2[i].s_nt]) //#pragma acc enter data copyin(stage2[i].gather_map[i:stage2[i].g_size],stage2[i].gather_mapf[i:stage2[i].g_nt]) } } gs_flatmap_setup(stage2[i].scatter_map,(int**)&(stage2[i].scatter_mapf), (int*)&(stage2[i].s_nt),(int*)&(stage2[i].s_size)); gs_flatmap_setup(stage2[i].gather_map,(int**)&(stage2[i].gather_mapf), (int*)&(stage2[i].g_nt),(int*)&(stage2[i].g_size)); //#pragma acc enter data copyin(stage2[i].scatter_map[i:stage2[i].s_size],stage2[i].scatter_mapf[i:stage2[i].s_nt]) //#pragma acc enter data copyin(stage2[i].gather_map[i:stage2[i].g_size],stage2[i].gather_mapf[i:stage2[i].g_nt]) return crd; } static void cr_free_stage_maps(struct cr_stage *stage, unsigned kmax) { unsigned int k; const unsigned int *map; int *mapf; for(k=0; k<kmax; ++k) { map = stage->scatter_map; mapf = stage->scatter_mapf; #pragma omp target exit data map(release:map,mapf) #pragma acc exit data delete(map,mapf) if(k!=0) { map = stage->gather_map; mapf = stage->gather_mapf; #pragma omp target exit data map(release:map,mapf) #pragma acc exit data delete(map,mapf) } free((uint*)stage->scatter_map); free((uint*)stage->scatter_mapf); ++stage; } map = stage->scatter_map; mapf = stage->scatter_mapf; #pragma omp target exit data map(release:map,mapf) #pragma acc exit data delete(map,mapf) map = stage->gather_map; mapf = stage->gather_mapf; #pragma omp target exit data map(release:map,mapf) #pragma acc exit data delete(map,mapf) free((uint*)stage->scatter_map); free((uint*)stage->scatter_mapf); } static void cr_free(struct cr_data *data) { cr_free_stage_maps(data->stage[0],data->nstages); cr_free_stage_maps(data->stage[1],data->nstages); free(data->stage[0]); free(data); } static void cr_setup(struct gs_remote *r, struct gs_topology *top, const struct comm *comm, buffer *buf, int dstride) { struct cr_data *crd = cr_setup_aux(&top->sh,comm,buf, &r->mem_size); r->buffer_size = crd->buffer_size; r->data = crd; r->exec = (exec_fun*)&cr_exec; r->exec_isend = (exec_fun*)&cr_exec_isend; r->exec_irecv = (exec_fun*)&cr_exec_irecv; r->exec_wait = (exec_fun*)&cr_exec_wait; r->fin = (fin_fun*)&cr_free; } /*------------------------------------------------------------------------------ All-reduce Execution ------------------------------------------------------------------------------*/ struct allreduce_data { const uint *map_to_buf[2], *map_from_buf[2]; int *map_to_buf_f[2],*map_from_buf_f[2]; int mt_nt[2],mf_nt[2]; int mt_size[2],mf_size[2]; uint buffer_size; }; static void allreduce_exec( void *data, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, const void *execdata, const struct comm *comm, char *buf, int dstride,int acc, int bufSize) { const struct allreduce_data *ard = execdata; static gs_scatter_fun *const scatter_to_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many_to_vec, &scatter_noop }; static gs_scatter_fun *const scatter_from_buf[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_vec_to_many, &scatter_noop }; uint gvn = vn*(ard->buffer_size/2); unsigned unit_size = gs_dom_size[dom]; char *ardbuf; int id,i; ardbuf = buf+unit_size*gvn; double *ddata = data; /* user array -> buffer */ gs_init_array(buf,gvn,dom,op,acc); scatter_to_buf[mode](buf,data,vn,ard->map_to_buf[transpose],dom,dstride, ard->mt_nt[transpose],ard->map_to_buf_f[transpose], ard->mt_size[transpose],acc); /* all reduce */ #pragma omp target update from(buf[0:vn*unit_size*bufSize]) if(acc) #pragma acc update host(buf[0:vn*unit_size*bufSize]) if(acc) comm_allreduce(comm,dom,op, buf,gvn, ardbuf); /* buffer -> user array */ #pragma omp target update to(buf[0:vn*unit_size*bufSize]) if(acc) #pragma acc update device(buf[0:vn*unit_size*bufSize]) if(acc) scatter_from_buf[mode](data,buf,vn,ard->map_from_buf[transpose],dom,dstride, ard->mf_nt[transpose],ard->map_from_buf_f[transpose], ard->mf_size[transpose],acc); } /*------------------------------------------------------------------------------ All-reduce setup ------------------------------------------------------------------------------*/ static const uint *allreduce_map_setup( struct array *pr, const unsigned flags_mask, int to_buf, uint *mem_size) { struct primary_shared_id *p, *pe; uint count=1, *map, *m; for(p=pr->ptr,pe=p+pr->n;p!=pe;++p) if((p->flag&flags_mask)==0) count+=3; m=map=tmalloc(uint,count); *mem_size += count*sizeof(uint); if(to_buf) { for(p=pr->ptr,pe=p+pr->n;p!=pe;++p) if((p->flag&flags_mask)==0) *m++ = p->i, *m++ = p->ord, *m++ = -(uint)1; } else { for(p=pr->ptr,pe=p+pr->n;p!=pe;++p) if((p->flag&flags_mask)==0) *m++ = p->ord, *m++ = p->i, *m++ = -(uint)1; } *m=-(uint)1; return map; } static struct allreduce_data *allreduce_setup_aux( struct array *pr, ulong total_shared, uint *mem_size) { struct allreduce_data *ard = tmalloc(struct allreduce_data,1); *mem_size = sizeof(struct allreduce_data); /* default behavior: reduce only unflagged data, copy to all */ ard->map_to_buf [0] = allreduce_map_setup(pr,1,1, mem_size); ard->map_from_buf[0] = allreduce_map_setup(pr,0,0, mem_size); gs_flatmap_setup(ard->map_to_buf[0],&(ard->map_to_buf_f[0]),&(ard->mt_nt[0]),&(ard->mt_size[0])); gs_flatmap_setup(ard->map_from_buf[0],&(ard->map_from_buf_f[0]),&(ard->mf_nt[0]), &(ard->mf_size[0])); /* transpose behavior: reduce all data, copy to unflagged */ ard->map_to_buf [1] = allreduce_map_setup(pr,0,1, mem_size); ard->map_from_buf[1] = allreduce_map_setup(pr,1,0, mem_size); gs_flatmap_setup(ard->map_to_buf[1],&(ard->map_to_buf_f[1]),&(ard->mt_nt[1]),&(ard->mt_size[1])); gs_flatmap_setup(ard->map_from_buf[1],&(ard->map_from_buf_f[1]),&(ard->mf_nt[1]), &(ard->mf_size[1])); ard->buffer_size = total_shared*2; return ard; } static void allreduce_free(struct allreduce_data *ard) { //#pragma acc exit data delete(ard->map_to_buf[0],ard->map_to_buf[1],ard->map_from_buf[0],ard->map_from_buf[1]) free((uint*)ard->map_to_buf[0]); free((uint*)ard->map_to_buf[1]); free((uint*)ard->map_from_buf[0]); free((uint*)ard->map_from_buf[1]); free(ard); } static void allreduce_setup(struct gs_remote *r, struct gs_topology *top, const struct comm *comm, buffer *buf,int dstride) { struct allreduce_data *ard = allreduce_setup_aux(&top->pr,top->total_shared, &r->mem_size); r->buffer_size = ard->buffer_size; r->data = ard; r->exec = (exec_fun*)&allreduce_exec; r->fin = (fin_fun*)&allreduce_free; //#pragma acc enter data copyin(ard->map_to_buf[0][0:ard->mt_size[0]],ard->map_from_buf[0][0:ard->mf_size[0]],ard->map_to_buf_f[0][0:ard->mt_nt[0]],ard->map_from_buf_f[0][0:ard->mf_nt[0]],ard->map_to_buf[1][0:ard->mt_size[1]],ard->map_from_buf[1][0:ard->mf_size[1]],ard->map_to_buf_f[1][0:ard->mt_nt[1]],ard->map_from_buf_f[1][0:ard->mf_nt[1]]) } /*------------------------------------------------------------------------------ Automatic Setup --- dynamically picks the fastest method ------------------------------------------------------------------------------*/ static void dry_run_time(double times[3], const struct gs_remote *r, const struct comm *comm, buffer *buf) { int i; double t; buffer_reserve(buf,gs_dom_size[gs_double]*r->buffer_size); for(i= 2;i;--i) r->exec(0,mode_dry_run,1,gs_double,gs_add,0,r->data,comm,buf->ptr,0,0,0); comm_barrier(comm); t = comm_time(); for(i=10;i;--i) r->exec(0,mode_dry_run,1,gs_double,gs_add,0,r->data,comm,buf->ptr,0,0,0); t = (comm_time() - t)/10; times[0] = t/comm->np, times[1] = t, times[2] = t; comm_allreduce(comm,gs_double,gs_add, &times[0],1, &t); comm_allreduce(comm,gs_double,gs_min, &times[1],1, &t); comm_allreduce(comm,gs_double,gs_max, &times[2],1, &t); } static void auto_setup(struct gs_remote *r, struct gs_topology *top, const struct comm *comm, buffer *buf,int dstride) { pw_setup(r, top,comm,buf,dstride); if(comm->np>1) { const char *name = "pairwise"; struct gs_remote r_alt; double time[2][3]; #if 0 //Added to force it to use pw when OpenACC is defined - Matt Otten - 10-28-14 if(comm->id==0) printf(" used all_to_all method ACC: %s\n",name); #else #define DRY_RUN(i,gsr,str) do { \ if(comm->id==0) printf(" " str ": "); \ dry_run_time(time[i],gsr,comm,buf); \ if(comm->id==0) \ printf("%g %g %g\n",time[i][0],time[i][1],time[i][2]); \ } while(0) #define DRY_RUN_CHECK(str,new_name) do { \ DRY_RUN(1,&r_alt,str); \ if(time[1][2]<time[0][2]) \ time[0][2]=time[1][2], name=new_name, \ r->fin(r->data), *r = r_alt; \ else \ r_alt.fin(r_alt.data); \ } while(0) DRY_RUN(0, r, "pairwise times (avg, min, max)"); cr_setup(&r_alt, top,comm,buf,dstride); DRY_RUN_CHECK( "crystal router ", "crystal router"); if(top->total_shared<100000) { allreduce_setup(&r_alt, top,comm,buf,dstride); DRY_RUN_CHECK( "all reduce ", "allreduce"); } #undef DRY_RUN_CHECK #undef DRY_RUN if(comm->id==1) printf(" used all_to_all method: %s\n",name); #endif } } void print_acc(double *a,int n){ int i; #pragma omp target update from(a[0:n]) #pragma acc update host(a[0:n]) for(i=0;i<n;i++){ printf("%f ",a[i]); } printf("\n"); } void print_acc_int(int *a,int n){ int i; #pragma omp target update from(a[0:n]) #pragma acc update host(a[0:n]) for(i=0;i<n;i++){ printf("%d ",a[i]); } printf("\n"); } /*------------------------------------------------------------------------------ Main Execution ------------------------------------------------------------------------------*/ struct gs_data { struct comm comm; const uint *map_local[2]; /* 0=unflagged, 1=all */ const uint *flagged_primaries; struct gs_remote r; int *map_localf[2]; int *fp_mapf; int m_size[2]; int fp_size; int mf_nt[2]; int fp_m_nt; // nt means number of terminators int dstride; int u_size; uint handle_size; }; static void gs_aux( void *u, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { int acc, i; char *bufPtr; static gs_scatter_fun *const local_scatter[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many, &scatter_noop }; static gs_gather_fun *const local_gather [] = { &gs_gather, &gs_gather_vec, &gs_gather_many, &gather_noop }; static gs_init_fun *const init[] = { &gs_init, &gs_init_vec, &gs_init_many, &init_noop }; if(!buf) buf = &static_buffer; bufPtr = buf->ptr; #pragma omp target exit data map(release:bufPtr) #pragma acc exit data delete(bufPtr) buffer_reserve(buf,vn*gs_dom_size[dom]*gsh->r.buffer_size); bufPtr = buf->ptr; #pragma omp target enter data map(to:bufPtr[0:vn*gs_dom_size[dom]*gsh->r.buffer_size]) #pragma acc enter data create(bufPtr[0:vn*gs_dom_size[dom]*gsh->r.buffer_size]) acc = 0; #ifdef _OPENACC if(acc_is_present(u,1)) { acc = 1; } #endif // There's a different API for checking the presence of a pointer // on the device. Here, the second argument is the device number. // In OpenACC, the second argument is the size in byte of the data. #ifdef _OPENMP if(omp_target_is_present(u,0)) acc = 1; #endif local_gather [mode](u,u,vn,gsh->map_local[0^transpose],dom,op,gsh->dstride, gsh->mf_nt[0^transpose],gsh->map_localf[0^transpose], gsh->m_size[0^transpose],acc); if(transpose==0) init[mode](u,vn,gsh->flagged_primaries,dom,op,gsh->dstride, gsh->fp_m_nt,gsh->fp_mapf,gsh->fp_size,acc); gsh->r.exec(u,mode,vn,dom,op,transpose,gsh->r.data,&gsh->comm,buf->ptr,gsh->dstride,acc,gsh->r.buffer_size); local_scatter[mode](u,u,vn,gsh->map_local[1^transpose],dom,gsh->dstride, gsh->mf_nt[1^transpose],gsh->map_localf[1^transpose], gsh->m_size[1^transpose],acc); } static void gs_aux_irecv( void *u, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { int acc, i; char *bufPtr; static gs_scatter_fun *const local_scatter[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many, &scatter_noop }; static gs_gather_fun *const local_gather [] = { &gs_gather, &gs_gather_vec, &gs_gather_many, &gather_noop }; static gs_init_fun *const init[] = { &gs_init, &gs_init_vec, &gs_init_many, &init_noop }; if(!buf) buf = &static_buffer; bufPtr = buf->ptr; #pragma omp target exit data map(release:bufPtr) #pragma acc exit data delete(bufPtr) buffer_reserve(buf,vn*gs_dom_size[dom]*gsh->r.buffer_size); bufPtr = buf->ptr; #pragma omp target enter data map(to:bufPtr[0:vn*gs_dom_size[dom]*gsh->r.buffer_size]) #pragma acc enter data create(bufPtr[0:vn*gs_dom_size[dom]*gsh->r.buffer_size]) acc = 0; #ifdef _OPENACC if(acc_is_present(u,1)) { acc = 1; } #endif // There's a different API for checking the presence of a pointer // on the device. Here, the second argument is the device number. // In OpenACC, the second argument is the size in byte of the data. #ifdef _OPENMP if(omp_target_is_present(u,0)) acc = 1; #endif gsh->r.exec_irecv(u,mode,vn,dom,op,transpose,gsh->r.data,&gsh->comm,buf->ptr,gsh->dstride,acc,gsh->r.buffer_size); } static void gs_aux_isend( void *u, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { int acc, i; acc = 0; #ifdef _OPENACC if(acc_is_present(u,1)) { acc = 1; } #endif // There's a different API for checking the presence of a pointer // on the device. Here, the second argument is the device number. // In OpenACC, the second argument is the size in byte of the data. #ifdef _OPENMP if(omp_target_is_present(u,0)) acc = 1; #endif static gs_scatter_fun *const local_scatter[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many, &scatter_noop }; static gs_gather_fun *const local_gather [] = { &gs_gather, &gs_gather_vec, &gs_gather_many, &gather_noop }; static gs_init_fun *const init[] = { &gs_init, &gs_init_vec, &gs_init_many, &init_noop }; if(!buf) buf = &static_buffer; local_gather [mode](u,u,vn,gsh->map_local[0^transpose],dom,op,gsh->dstride, gsh->mf_nt[0^transpose],gsh->map_localf[0^transpose], gsh->m_size[0^transpose],acc); if(transpose==0) init[mode](u,vn,gsh->flagged_primaries,dom,op,gsh->dstride, gsh->fp_m_nt,gsh->fp_mapf,gsh->fp_size,acc); gsh->r.exec_isend(u,mode,vn,dom,op,transpose,gsh->r.data,&gsh->comm,buf->ptr,gsh->dstride,acc,gsh->r.buffer_size); } static void gs_aux_wait( void *u, gs_mode mode, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { int acc, i; acc = 0; #ifdef _OPENACC if(acc_is_present(u,1)) { acc = 1; } #endif // There's a different API for checking the presence of a pointer // on the device. Here, the second argument is the device number. // In OpenACC, the second argument is the size in byte of the data. #ifdef _OPENMP if(omp_target_is_present(u,0)) acc = 1; #endif static gs_scatter_fun *const local_scatter[] = { &gs_scatter, &gs_scatter_vec, &gs_scatter_many, &scatter_noop }; static gs_gather_fun *const local_gather [] = { &gs_gather, &gs_gather_vec, &gs_gather_many, &gather_noop }; static gs_init_fun *const init[] = { &gs_init, &gs_init_vec, &gs_init_many, &init_noop }; if(!buf) buf = &static_buffer; gsh->r.exec_wait(u,mode,vn,dom,op,transpose,gsh->r.data,&gsh->comm,buf->ptr,gsh->dstride,acc,gsh->r.buffer_size); local_scatter[mode](u,u,vn,gsh->map_local[1^transpose],dom,gsh->dstride, gsh->mf_nt[1^transpose],gsh->map_localf[1^transpose], gsh->m_size[1^transpose],acc); } void gs(void *u, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux(u,mode_plain,1,dom,op,transpose,gsh,buf); } /*------------------------------------------------------------ GS nonblocking -------------------------------------------------------------*/ void gs_irecv(void *u, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux_irecv(u,mode_plain,1,dom,op,transpose,gsh,buf); } void gs_isend(void *u, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux_isend(u,mode_plain,1,dom,op,transpose,gsh,buf); } void gs_wait(void *u, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux_wait(u,mode_plain,1,dom,op,transpose,gsh,buf); } void gs_vec(void *u, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux(u,mode_vec,vn,dom,op,transpose,gsh,buf); } void gs_many_isend(void *u, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux_isend((void*)u,mode_many,vn,dom,op,transpose,gsh,buf); } void gs_many_irecv(void *u, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux_irecv((void*)u,mode_many,vn,dom,op,transpose,gsh,buf); } void gs_many_wait(void *u, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux_wait((void*)u,mode_many,vn,dom,op,transpose,gsh,buf); } void gs_many(void *u, unsigned vn, gs_dom dom, gs_op op, unsigned transpose, struct gs_data *gsh, buffer *buf) { gs_aux((void*)u,mode_many,vn,dom,op,transpose,gsh,buf); } /*------------------------------------------------------------------------------ Main Setup ------------------------------------------------------------------------------*/ typedef enum {gs_auto, gs_pairwise, gs_crystal_router, gs_all_reduce} gs_method; static uint local_setup(struct gs_data *gsh, const struct array *nz) { uint mem_size = 0,s=0,i; char hname[1024]; // gethostname(hname, sizeof(hname)); s = 0; gsh->map_local[0] = local_map(nz,1, &s); gs_flatmap_setup(gsh->map_local[0],&(gsh->map_localf[0]),&(gsh->mf_nt[0]),&(gsh->m_size[0])); mem_size += s; //fprintf(stderr,"%s: map[0:%d] -> %lX : %lX\n",hname,s/4,gsh->map_local[0],((void*)gsh->map_local[0])+s); s = 0; gsh->map_local[1] = local_map(nz,0, &s); gs_flatmap_setup(gsh->map_local[1],&(gsh->map_localf[1]),&(gsh->mf_nt[1]),&(gsh->m_size[1])); mem_size += s; //fprintf(stderr,"%s: t_map[0:%d] -> %lX : %lX\n",hname,s/4,gsh->map_local[1],((void*)gsh->map_local[1])+s); s = 0; gsh->flagged_primaries = flagged_primaries_map(nz, &s); gs_flatmap_setup(gsh->flagged_primaries,&(gsh->fp_mapf),&(gsh->fp_m_nt),&(gsh->fp_size)); mem_size += s; //fprintf(stderr,"%s: fp_map[0:%d] -> %lX : %lX\n",hname,s/4,gsh->flagged_primaries,((void*)gsh->flagged_primaries)+s); return mem_size; } static void gs_setup_aux(struct gs_data *gsh, const slong *id, uint n, int unique, gs_method method, int verbose) { static setup_fun *const remote_setup[] = { &auto_setup, &pw_setup, &cr_setup, &allreduce_setup }; struct gs_topology top; struct crystal cr; crystal_init(&cr,&gsh->comm); get_topology(&top, id,n, &cr); if(unique) make_topology_unique(&top,0,gsh->comm.id,&cr.data); gsh->handle_size = sizeof(struct gs_data); gsh->handle_size += local_setup(gsh,&top.nz); gsh->dstride = (int)n; if(verbose && gsh->comm.id==0) printf("gs_setup: %ld unique labels shared\n",(long)top.total_shared); remote_setup[method](&gsh->r, &top,&gsh->comm,&cr.data,gsh->dstride); gsh->handle_size += gsh->r.mem_size; if(verbose) { /* report memory usage */ double avg[2],td[2]; uint min[2],max[2],ti[2]; avg[0] = min[0] = max[0] = gsh->handle_size; avg[1] = min[1] = max[1] = sizeof(double)*gsh->r.buffer_size; avg[0] /= gsh->comm.np; avg[1] /= gsh->comm.np; comm_allreduce(&gsh->comm,gs_double,gs_add, avg,2, td); comm_allreduce(&gsh->comm,gs_sint,gs_min, min,2, ti); comm_allreduce(&gsh->comm,gs_sint,gs_max, max,2, ti); if(gsh->comm.id==0) { printf(" " "handle bytes (avg, min, max)" ": " "%g %u %u\n", avg[0], (unsigned)min[0], (unsigned)max[0]); printf(" " "buffer bytes (avg, min, max)" ": " "%g %u %u\n", avg[1], (unsigned)min[1], (unsigned)max[1]); } } gs_topology_free(&top); crystal_free(&cr); } struct gs_data *gs_setup(const slong *id, uint n, const struct comm *comm, int unique, gs_method method, int verbose) { struct gs_data *gsh = tmalloc(struct gs_data,1); comm_dup(&gsh->comm,comm); gs_setup_aux(gsh,id,n,unique,method,verbose); return gsh; } void gs_free(struct gs_data *gsh) { comm_free(&gsh->comm); const uint *map_local0 = gsh->map_local[0],*map_local1 = gsh->map_local[1],*flagged_primaries = gsh->flagged_primaries; #pragma omp target exit data map(release:map_local0,map_local1,flagged_primaries) #pragma acc exit data delete(map_local0,map_local1,flagged_primaries) free((uint*)gsh->map_local[0]), free((uint*)gsh->map_local[1]); free((uint*)gsh->flagged_primaries); gsh->r.fin(gsh->r.data); free(gsh); } void gs_unique(slong *id, uint n, const struct comm *comm) { struct gs_topology top; struct crystal cr; crystal_init(&cr,comm); get_topology(&top, id,n, &cr); make_topology_unique(&top,id,comm->id,&cr.data); gs_topology_free(&top); crystal_free(&cr); } void gs_flatmap_setup(const uint *map, int **mapf, int *mf_nt, int *m_size) { uint i,j,k; int mf_temp; *m_size = map_size(map,&mf_temp); *mf_nt = mf_temp; *mapf = (int*)malloc(mf_temp*2*sizeof(int)); for(i=0,k=0;map[i]!=-1;i=j+1,k++){ // Record i *(*mapf+k*2) = i; for(j=i+1;map[j]!=-1;j++); // Record j-i *(*mapf+k*2+1) = j-i-1; } int *mapf2 = *mapf; #pragma omp target enter data map(to:map[0:*m_size],mapf2[0:2*mf_temp]) #pragma acc enter data pcopyin(map[0:*m_size],mapf2[0:2*mf_temp]) return; } static int map_size(const uint *map, int *t) { int i,ct=0; *t = 0; // No map if(!map) { return 0; } // "Empty" map (contains only a single -1 terminator) if(map[0] == -1) { return 1; } // "Regular" map (contains two -1's as termination) for(i=ct=0;ct<2;i++){ if(map[i]==-1){ ct++; (*t)++; } else { ct=0; } } (*t)--; return i; } /*------------------------------------------------------------------------------ FORTRAN interface ------------------------------------------------------------------------------*/ #undef gs_op #undef gs_irecv #undef gs_isend #undef gs_wait #undef gs_free #undef gs_setup #undef gs_many #undef gs_vec #undef gs #undef gs_many_isend #undef gs_many_irecv #undef gs_many_wait #define cgs PREFIXED_NAME(gs ) #define cgs_irecv PREFIXED_NAME(gs_irecv) #define cgs_isend PREFIXED_NAME(gs_isend) #define cgs_wait PREFIXED_NAME(gs_wait ) #define cgs_vec PREFIXED_NAME(gs_vec ) #define cgs_many PREFIXED_NAME(gs_many ) #define cgs_many_isend PREFIXED_NAME(gs_many_isend ) #define cgs_many_irecv PREFIXED_NAME(gs_many_irecv ) #define cgs_many_wait PREFIXED_NAME(gs_many_wait ) #define cgs_setup PREFIXED_NAME(gs_setup) #define cgs_free PREFIXED_NAME(gs_free ) #define fgs_setup_pick FORTRAN_NAME(gs_setup_pick,GS_SETUP_PICK) #define fgs_setup FORTRAN_NAME(gs_setup ,GS_SETUP ) #define fgs FORTRAN_NAME(gs_op ,GS_OP ) #define fgs_irecv FORTRAN_NAME(gs_op_irecv ,GS_OP_IRECV ) #define fgs_isend FORTRAN_NAME(gs_op_isend ,GS_OP_ISEND ) #define fgs_wait FORTRAN_NAME(gs_op_wait ,GS_OP_WAIT ) #define fgs_vec FORTRAN_NAME(gs_op_vec ,GS_OP_VEC ) #define fgs_many FORTRAN_NAME(gs_op_many ,GS_OP_MANY ) #define fgs_many_irecv FORTRAN_NAME(gs_op_many_irecv ,GS_OP_MANY_IRECV ) #define fgs_many_isend FORTRAN_NAME(gs_op_many_isend ,GS_OP_MANY_ISEND ) #define fgs_many_wait FORTRAN_NAME(gs_op_many_wait ,GS_OP_MANY_WAIT ) #define fgs_fields FORTRAN_NAME(gs_op_fields ,GS_OP_FIELDS ) #define fgs_fields_isend FORTRAN_NAME(gs_op_fields_isend, GS_OP_FIELDS_ISEND) #define fgs_fields_irecv FORTRAN_NAME(gs_op_fields_irecv, GS_OP_FIELDS_IRECV) #define fgs_fields_wait FORTRAN_NAME(gs_op_fields_wait, GS_OP_FIELDS_WAIT) #define fgs_free FORTRAN_NAME(gs_free ,GS_FREE ) static struct gs_data **fgs_info = 0; static int fgs_max = 0; static int fgs_n = 0; void fgs_setup_pick(sint *handle, const slong id[], const sint *n, const MPI_Fint *comm, const sint *np, const sint *method) { struct gs_data *gsh; if(fgs_n==fgs_max) fgs_max+=fgs_max/2+1, fgs_info=trealloc(struct gs_data*,fgs_info,fgs_max); gsh=fgs_info[fgs_n]=tmalloc(struct gs_data,1); comm_init_check(&gsh->comm,*comm,*np); #if defined(_OPENMP) || defined(_OPENACC) #ifdef GPUDIRECT if(gsh->comm.id==0) printf(" USE_GPU_DIRECT=1 \n"); #else if(gsh->comm.id==0) printf(" USE_GPU_DIRECT=0 \n"); #endif #endif gs_setup_aux(gsh,id,*n,0,*method,1); *handle = fgs_n++; } void fgs_setup(sint *handle, const slong id[], const sint *n, const MPI_Fint *comm, const sint *np) { #if defined(_OPENMP) || defined(_OPENACC) const sint method = gs_pairwise; #else const sint method = gs_auto; #endif fgs_setup_pick(handle,id,n,comm,np,&method); } static void fgs_check_handle(sint handle, const char *func, unsigned line) { if(handle<0 || handle>=fgs_n || !fgs_info[handle]) fail(1,__FILE__,line,"%s: invalid handle", func); } static const gs_dom fgs_dom[4] = { 0, gs_double, gs_sint, gs_slong }; static void fgs_check_parms(sint handle, sint dom, sint op, const char *func, unsigned line) { if(dom<1 || dom>3) fail(1,__FILE__,line,"%s: datatype %d not in valid range 1-3",func,dom); if(op <1 || op >4) fail(1,__FILE__,line,"%s: op %d not in valid range 1-4",func,op); fgs_check_handle(handle,func,line); } void fgs_vec(const sint *handle, void *u, const sint *n, const sint *dom, const sint *op, const sint *transpose) { fgs_check_parms(*handle,*dom,*op,"gs_op_vec",__LINE__); cgs_vec(u,*n,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0, fgs_info[*handle],NULL); } void fgs_many(const sint *handle, void *u1, void *u2, void *u3, void *u4, void *u5, void *u6, const sint *n, const sint *dom, const sint *op, const sint *transpose) { int i; void *uu[6]; uu[0]=u1,uu[1]=u2,uu[2]=u3,uu[3]=u4,uu[4]=u5,uu[5]=u6; fgs_check_parms(*handle,*dom,*op,"gs_op_many",__LINE__); #ifdef NEW_GS_LOOPS // Temporary patch for fgs_many - cgs_many has memory errors with the new // format for(i=0;i<*n;i++) { cgs(uu[i],fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0,fgs_info[*handle],NULL); } #else cgs_many(uu,*n,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0, fgs_info[*handle],NULL); #endif } void fgs_many_irecv(const sint *handle, void *u1, void *u2, void *u3, void *u4, void *u5, void *u6, const sint *n, const sint *dom, const sint *op, const sint *transpose) { int i; void *uu[6]; uu[0]=u1,uu[1]=u2,uu[2]=u3,uu[3]=u4,uu[4]=u5,uu[5]=u6; fgs_check_parms(*handle,*dom,*op,"gs_op_many",__LINE__); #ifdef NEW_GS_LOOPS // Temporary patch for fgs_many - cgs_many has memory errors with the new // format for(i=0;i<*n;i++) { cgs_irecv(uu[i],fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0,fgs_info[*handle],NULL); } #else cgs_many_irecv(uu,*n,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0, fgs_info[*handle],NULL); #endif } void fgs_many_isend(const sint *handle, void *u1, void *u2, void *u3, void *u4, void *u5, void *u6, const sint *n, const sint *dom, const sint *op, const sint *transpose) { int i; void *uu[6]; uu[0]=u1,uu[1]=u2,uu[2]=u3,uu[3]=u4,uu[4]=u5,uu[5]=u6; fgs_check_parms(*handle,*dom,*op,"gs_op_many",__LINE__); #ifdef NEW_GS_LOOPS // Temporary patch for fgs_many - cgs_many has memory errors with the new // format for(i=0;i<*n;i++) { cgs_isend(uu[i],fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0,fgs_info[*handle],NULL); } #else cgs_many_isend(uu,*n,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0, fgs_info[*handle],NULL); #endif } void fgs_many_wait(const sint *handle, void *u1, void *u2, void *u3, void *u4, void *u5, void *u6, const sint *n, const sint *dom, const sint *op, const sint *transpose) { int i; void *uu[6]; uu[0]=u1,uu[1]=u2,uu[2]=u3,uu[3]=u4,uu[4]=u5,uu[5]=u6; fgs_check_parms(*handle,*dom,*op,"gs_op_many",__LINE__); #ifdef NEW_GS_LOOPS // Temporary patch for fgs_many - cgs_many has memory errors with the new // format for(i=0;i<*n;i++) { cgs_wait(uu[i],fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0,fgs_info[*handle],NULL); } #else cgs_many_wait(uu,*n,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0, fgs_info[*handle],NULL); #endif } static struct array fgs_fields_array = null_array; void fgs(const sint *handle, void *u, const sint *dom, const sint *op, const sint *transpose) { fgs_check_parms(*handle,*dom,*op,"gs_op",__LINE__); cgs(u,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0,fgs_info[*handle],NULL); } void fgs_isend(const sint *handle, void *u, const sint *dom, const sint *op, const sint *transpose) { fgs_check_parms(*handle,*dom,*op,"gs_op",__LINE__); cgs_isend(u,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0,fgs_info[*handle],NULL); } void fgs_irecv(const sint *handle, void *u, const sint *dom, const sint *op, const sint *transpose) { fgs_check_parms(*handle,*dom,*op,"gs_op",__LINE__); cgs_irecv(u,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0,fgs_info[*handle],NULL); } void fgs_wait(const sint *handle, void *u, const sint *dom, const sint *op, const sint *transpose) { fgs_check_parms(*handle,*dom,*op,"gs_op",__LINE__); cgs_wait(u,fgs_dom[*dom],(gs_op_t)(*op-1),*transpose!=0,fgs_info[*handle],NULL); } void fgs_fields_isend(const sint *handle, void *u, const sint *stride, const sint *n, const sint *dom, const sint *op, const sint *transpose) { size_t offset; uint i; void **p; fgs_check_parms(*handle,*dom,*op,"gs_op_fields",__LINE__); if(*n<0) return; #if defined NEW_GS_LOOPS || defined _OPENMP || defined _OPENACC cgs_many_isend(u,*n, fgs_dom[*dom],(gs_op_t)(*op-1), *transpose!=0, fgs_info[*handle],NULL); #else array_reserve(void*,&fgs_fields_array,*n); p = fgs_fields_array.ptr; offset = *stride * gs_dom_size[*dom-1]; for(i=*n;i;--i) *p++ = u, u = (char*)u + offset; cgs_many_isend((void *)fgs_fields_array.ptr,*n, (gs_dom)(*dom-1),(gs_op_t)(*op-1), *transpose!=0, fgs_info[*handle],NULL); #endif } void fgs_fields_irecv(const sint *handle, void *u, const sint *stride, const sint *n, const sint *dom, const sint *op, const sint *transpose) { size_t offset; uint i; void **p; #if defined NEW_GS_LOOPS || defined _OPENMP || defined _OPENACC cgs_many_irecv(u,*n, fgs_dom[*dom],(gs_op_t)(*op-1), *transpose!=0, fgs_info[*handle],NULL); #else array_reserve(void*,&fgs_fields_array,*n); p = fgs_fields_array.ptr; offset = *stride * gs_dom_size[*dom-1]; for(i=*n;i;--i) *p++ = u, u = (char*)u + offset; cgs_many_irecv((void *)fgs_fields_array.ptr,*n, (gs_dom)(*dom-1),(gs_op_t)(*op-1), *transpose!=0, fgs_info[*handle],NULL); #endif } void fgs_fields_wait(const sint *handle, void *u, const sint *stride, const sint *n, const sint *dom, const sint *op, const sint *transpose) { size_t offset; uint i; void **p; fgs_check_parms(*handle,*dom,*op,"gs_op_fields",__LINE__); if(*n<0) return; #if defined NEW_GS_LOOPS || defined _OPENMP || defined _OPENACC cgs_many_wait(u,*n, fgs_dom[*dom],(gs_op_t)(*op-1), *transpose!=0, fgs_info[*handle],NULL); #else array_reserve(void*,&fgs_fields_array,*n); p = fgs_fields_array.ptr; offset = *stride * gs_dom_size[*dom-1]; for(i=*n;i;--i) *p++ = u, u = (char*)u + offset; cgs_many_wait((void *const*)fgs_fields_array.ptr,*n, (gs_dom)(*dom-1),(gs_op_t)(*op-1), *transpose!=0, fgs_info[*handle],NULL); #endif } void fgs_fields(const sint *handle, void *u, const sint *stride, const sint *n, const sint *dom, const sint *op, const sint *transpose) { size_t offset; uint i; void **p; fgs_check_parms(*handle,*dom,*op,"gs_op_fields",__LINE__); if(*n<0) return; #if defined NEW_GS_LOOPS || defined _OPENMP || defined _OPENACC cgs_many(u,*n, fgs_dom[*dom],(gs_op_t)(*op-1), *transpose!=0, fgs_info[*handle],NULL); #else array_reserve(void*,&fgs_fields_array,*n); p = fgs_fields_array.ptr; offset = *stride * gs_dom_size[*dom-1]; for(i=*n;i;--i) *p++ = u, u = (char*)u + offset; cgs_many((void *)fgs_fields_array.ptr,*n, (gs_dom)(*dom-1),(gs_op_t)(*op-1), *transpose!=0, fgs_info[*handle],NULL); #endif } void fgs_free(const sint *handle) { fgs_check_handle(*handle,"gs_free",__LINE__); cgs_free(fgs_info[*handle]); fgs_info[*handle] = 0; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
nmt_covar.c
#include "config.h" #include "utils.h" nmt_covar_workspace *nmt_covar_workspace_init(nmt_field *fla1,nmt_field *fla2, nmt_field *flb1,nmt_field *flb2, int lmax,int niter, int l_toeplitz,int l_exact,int dl_band) { if(!(nmt_diff_curvedsky_info(fla1->cs,fla2->cs)) || !(nmt_diff_curvedsky_info(fla1->cs,flb1->cs)) || !(nmt_diff_curvedsky_info(fla1->cs,flb2->cs))) report_error(NMT_ERROR_COVAR,"Can't compute covariance for fields with different resolutions\n"); nmt_covar_workspace *cw=my_malloc(sizeof(nmt_covar_workspace)); int ii; int npix=fla1->cs->npix; flouble *mask_a1b1=my_malloc(npix*sizeof(flouble)); flouble *mask_a1b2=my_malloc(npix*sizeof(flouble)); flouble *mask_a2b1=my_malloc(npix*sizeof(flouble)); flouble *mask_a2b2=my_malloc(npix*sizeof(flouble)); cw->lmax=lmax; flouble **cl_masks=my_malloc(2*sizeof(flouble)); cl_masks[0]=my_malloc((cw->lmax+1)*sizeof(flouble)); cl_masks[1]=my_malloc((cw->lmax+1)*sizeof(flouble)); he_map_product(fla1->cs,fla1->mask,flb1->mask,mask_a1b1); he_map_product(fla1->cs,fla1->mask,flb2->mask,mask_a1b2); he_map_product(fla1->cs,fla2->mask,flb1->mask,mask_a2b1); he_map_product(fla1->cs,fla2->mask,flb2->mask,mask_a2b2); he_anafast(&mask_a1b1,&mask_a2b2,0,0,&(cl_masks[0]),fla1->cs,cw->lmax,niter); he_anafast(&mask_a1b2,&mask_a2b1,0,0,&(cl_masks[1]),fla1->cs,cw->lmax,niter); free(mask_a1b1); free(mask_a1b2); free(mask_a2b1); free(mask_a2b2); for(ii=0;ii<=cw->lmax;ii++) { cl_masks[0][ii]*=(ii+0.5)/(2*M_PI); cl_masks[1][ii]*=(ii+0.5)/(2*M_PI); } nmt_master_calculator *c=nmt_compute_master_coefficients(cw->lmax, cw->lmax, 2, cl_masks, 0, 2, 0, 0, 0, 0, 1, l_toeplitz,l_exact,dl_band); cw->xi00_1122=c->xi_00[0]; cw->xi00_1221=c->xi_00[1]; cw->xi02_1122=c->xi_0s[0][0]; cw->xi02_1221=c->xi_0s[1][0]; cw->xi22p_1122=c->xi_pp[0][0]; cw->xi22p_1221=c->xi_pp[1][0]; cw->xi22m_1122=c->xi_mm[0][0]; cw->xi22m_1221=c->xi_mm[1][0]; free(c->xi_00); free(c->xi_0s[0]); free(c->xi_0s[1]); free(c->xi_0s); free(c->xi_pp[0]); free(c->xi_pp[1]); free(c->xi_pp); free(c->xi_mm[0]); free(c->xi_mm[1]); free(c->xi_mm); free(c); free(cl_masks[0]); free(cl_masks[1]); free(cl_masks); return cw; } void nmt_covar_workspace_free(nmt_covar_workspace *cw) { int ii; for(ii=0;ii<=cw->lmax;ii++) { free(cw->xi00_1122[ii]); free(cw->xi00_1221[ii]); free(cw->xi02_1122[ii]); free(cw->xi02_1221[ii]); free(cw->xi22p_1122[ii]); free(cw->xi22p_1221[ii]); free(cw->xi22m_1122[ii]); free(cw->xi22m_1221[ii]); } free(cw); } void nmt_compute_gaussian_covariance_coupled(nmt_covar_workspace *cw, int spin_a,int spin_b,int spin_c,int spin_d, nmt_workspace *wa,nmt_workspace *wb, flouble **clac,flouble **clad, flouble **clbc,flouble **clbd, flouble *covar_out) { if((cw->lmax<wa->bin->ell_max) || (cw->lmax<wb->bin->ell_max)) report_error(NMT_ERROR_COVAR,"Coupling coefficients only computed up to l=%d, but you require" "lmax=%d. Recompute this workspace with a larger lmax\n",cw->lmax,wa->bin->ell_max); int nmaps_a=spin_a ? 2 : 1; int nmaps_b=spin_b ? 2 : 1; int nmaps_c=spin_c ? 2 : 1; int nmaps_d=spin_d ? 2 : 1; if((wa->ncls!=nmaps_a*nmaps_b) || (wb->ncls!=nmaps_c*nmaps_d)) report_error(NMT_ERROR_COVAR,"Input spins don't match input workspaces\n"); #pragma omp parallel default(none) \ shared(cw,spin_a,spin_b,spin_c,spin_d) \ shared(wa,wb,clac,clad,clbc,clbd) \ shared(nmaps_a,nmaps_b,nmaps_c,nmaps_d,covar_out) { int band_a; #pragma omp for for(band_a=0;band_a<wa->bin->n_bands;band_a++) { int band_b; for(band_b=0;band_b<wb->bin->n_bands;band_b++) { int ia; for(ia=0;ia<nmaps_a;ia++) { int ib; for(ib=0;ib<nmaps_b;ib++) { int ic; int icl_a=ib+nmaps_b*ia; for(ic=0;ic<nmaps_c;ic++) { int id; for(id=0;id<nmaps_d;id++) { int ila; int icl_b=id+nmaps_d*ic; double cbinned=0; for(ila=0;ila<wa->bin->nell_list[band_a];ila++) { int ilb; int la=wa->bin->ell_list[band_a][ila]; for(ilb=0;ilb<wb->bin->nell_list[band_b];ilb++) { int iap; int lb=wb->bin->ell_list[band_b][ilb]; double xis_1122[6]={cw->xi00_1122[la][lb], cw->xi02_1122[la][lb], cw->xi22p_1122[la][lb], cw->xi22m_1122[la][lb], -cw->xi22m_1122[la][lb],0}; double xis_1221[6]={cw->xi00_1221[la][lb], cw->xi02_1221[la][lb], cw->xi22p_1221[la][lb], cw->xi22m_1221[la][lb], -cw->xi22m_1221[la][lb],0}; double prefac_ell=wa->bin->f_ell[band_a][ila]*wb->bin->f_ell[band_b][ilb]; double cov_element=0; for(iap=0;iap<nmaps_a;iap++) { int ibp; for(ibp=0;ibp<nmaps_b;ibp++) { int icp; for(icp=0;icp<nmaps_c;icp++) { int idp; for(idp=0;idp<nmaps_d;idp++) { double *cl_ac=clac[icp+nmaps_c*iap]; double *cl_ad=clad[idp+nmaps_d*iap]; double *cl_bc=clbc[icp+nmaps_c*ibp]; double *cl_bd=clbd[idp+nmaps_d*ibp]; double fac_1122=0.5*(cl_ac[la]*cl_bd[lb]+cl_ac[lb]*cl_bd[la]); double fac_1221=0.5*(cl_ad[la]*cl_bc[lb]+cl_ad[lb]*cl_bc[la]); int ind_1122=cov_get_coupling_pair_index(nmaps_a,nmaps_c,nmaps_b,nmaps_d, ia,iap,ic,icp,ib,ibp,id,idp); int ind_1221=cov_get_coupling_pair_index(nmaps_a,nmaps_d,nmaps_b,nmaps_c, ia,iap,id,idp,ib,ibp,ic,icp); cov_element+=(xis_1122[ind_1122]*fac_1122+xis_1221[ind_1221]*fac_1221)*prefac_ell; } } } } covar_out[((wa->ncls*la+icl_a)*(cw->lmax+1)+lb)*wb->ncls+icl_b]=cov_element; } } } } } } } } //end omp for } //end omp parallel } void nmt_compute_gaussian_covariance(nmt_covar_workspace *cw, int spin_a,int spin_b,int spin_c,int spin_d, nmt_workspace *wa,nmt_workspace *wb, flouble **clac,flouble **clad, flouble **clbc,flouble **clbd, flouble *covar_out) { if((cw->lmax<wa->bin->ell_max) || (cw->lmax<wb->bin->ell_max)) report_error(NMT_ERROR_COVAR,"Coupling coefficients only computed up to l=%d, but you require" "lmax=%d. Recompute this workspace with a larger lmax\n",cw->lmax,wa->bin->ell_max); int nmaps_a=spin_a ? 2 : 1; int nmaps_b=spin_b ? 2 : 1; int nmaps_c=spin_c ? 2 : 1; int nmaps_d=spin_d ? 2 : 1; if((wa->ncls!=nmaps_a*nmaps_b) || (wb->ncls!=nmaps_c*nmaps_d)) report_error(NMT_ERROR_COVAR,"Input spins don't match input workspaces\n"); gsl_matrix *covar_binned=gsl_matrix_alloc(wa->ncls*wa->bin->n_bands,wb->ncls*wb->bin->n_bands); #pragma omp parallel default(none) \ shared(cw,spin_a,spin_b,spin_c,spin_d) \ shared(wa,wb,clac,clad,clbc,clbd) \ shared(nmaps_a,nmaps_b,nmaps_c,nmaps_d,covar_binned) { int band_a; #pragma omp for for(band_a=0;band_a<wa->bin->n_bands;band_a++) { int band_b; for(band_b=0;band_b<wb->bin->n_bands;band_b++) { int ia; for(ia=0;ia<nmaps_a;ia++) { int ib; for(ib=0;ib<nmaps_b;ib++) { int ic; int icl_a=ib+nmaps_b*ia; for(ic=0;ic<nmaps_c;ic++) { int id; for(id=0;id<nmaps_d;id++) { int ila; int icl_b=id+nmaps_d*ic; double cbinned=0; for(ila=0;ila<wa->bin->nell_list[band_a];ila++) { int ilb; int la=wa->bin->ell_list[band_a][ila]; for(ilb=0;ilb<wb->bin->nell_list[band_b];ilb++) { int iap; int lb=wb->bin->ell_list[band_b][ilb]; double xis_1122[6]={cw->xi00_1122[la][lb],cw->xi02_1122[la][lb],cw->xi22p_1122[la][lb], cw->xi22m_1122[la][lb],-cw->xi22m_1122[la][lb],0}; double xis_1221[6]={cw->xi00_1221[la][lb],cw->xi02_1221[la][lb],cw->xi22p_1221[la][lb], cw->xi22m_1221[la][lb],-cw->xi22m_1221[la][lb],0}; double prefac_ell=wa->bin->w_list[band_a][ila]*wa->bin->f_ell[band_a][ila]* wb->bin->w_list[band_b][ilb]*wb->bin->f_ell[band_b][ilb]; for(iap=0;iap<nmaps_a;iap++) { int ibp; for(ibp=0;ibp<nmaps_b;ibp++) { int icp; for(icp=0;icp<nmaps_c;icp++) { int idp; for(idp=0;idp<nmaps_d;idp++) { double *cl_ac=clac[icp+nmaps_c*iap]; double *cl_ad=clad[idp+nmaps_d*iap]; double *cl_bc=clbc[icp+nmaps_c*ibp]; double *cl_bd=clbd[idp+nmaps_d*ibp]; double fac_1122=0.5*(cl_ac[la]*cl_bd[lb]+cl_ac[lb]*cl_bd[la]); double fac_1221=0.5*(cl_ad[la]*cl_bc[lb]+cl_ad[lb]*cl_bc[la]); int ind_1122=cov_get_coupling_pair_index(nmaps_a,nmaps_c,nmaps_b,nmaps_d, ia,iap,ic,icp,ib,ibp,id,idp); int ind_1221=cov_get_coupling_pair_index(nmaps_a,nmaps_d,nmaps_b,nmaps_c, ia,iap,id,idp,ib,ibp,ic,icp); cbinned+=(xis_1122[ind_1122]*fac_1122+xis_1221[ind_1221]*fac_1221)*prefac_ell; } } } } } } gsl_matrix_set(covar_binned,wa->ncls*band_a+icl_a,wb->ncls*band_b+icl_b,cbinned); } } } } } } //end omp for } //end omp parallel //Sandwich with inverse MCM gsl_matrix *covar_out_g =gsl_matrix_alloc(wa->ncls*wa->bin->n_bands,wb->ncls*wb->bin->n_bands); gsl_matrix *mat_tmp =gsl_matrix_alloc(wa->ncls*wa->bin->n_bands,wb->ncls*wb->bin->n_bands); gsl_matrix *inverse_a =gsl_matrix_alloc(wa->ncls*wa->bin->n_bands,wa->ncls*wa->bin->n_bands); gsl_matrix *inverse_b =gsl_matrix_alloc(wb->ncls*wb->bin->n_bands,wb->ncls*wb->bin->n_bands); gsl_linalg_LU_invert(wb->coupling_matrix_binned,wb->coupling_matrix_perm,inverse_b); //M_b^-1 gsl_linalg_LU_invert(wa->coupling_matrix_binned,wa->coupling_matrix_perm,inverse_a); //M_a^-1 gsl_blas_dgemm(CblasNoTrans,CblasTrans ,1,covar_binned,inverse_b,0,mat_tmp ); //tmp = C * M_b^-1^T gsl_blas_dgemm(CblasNoTrans,CblasNoTrans,1,inverse_a ,mat_tmp ,0,covar_out_g); //C' = M_a^-1 * C * M_b^-1^T int ii; long elem=0; for(ii=0;ii<wa->ncls*wa->bin->n_bands;ii++) { int jj; for(jj=0;jj<wb->ncls*wb->bin->n_bands;jj++) { covar_out[elem]=gsl_matrix_get(covar_out_g,ii,jj); elem++; } } gsl_matrix_free(covar_binned); gsl_matrix_free(mat_tmp); gsl_matrix_free(inverse_a); gsl_matrix_free(inverse_b); gsl_matrix_free(covar_out_g); }
allktruss.c
//------------------------------------------------------------------------------ // allktruss: construct all k-trusses of a graph (OpenMP, not with GraphBLAS) //------------------------------------------------------------------------------ // C = allktruss (A), all k-trusses of the graph A // On input, A is the adjacency matrix of a graph, which must be square with // symmetric pattern, and no diagonal entries. These conditions are not // checked. A is treated as if binary on input so the content of Ax is ignored // on input. The matrix A is represented in compressed sparse column form as // Ap, Ai, and n on input. That is, the pattern of column A(:,j) is held in // Ai [Ap [j] ... Ap [j+1]-1], where Ap [0] = 0 and Ap [n] = nnz (A). // The value of k for the requested k-truss is provided as the scalar input, // support = k-2, which must be > 0. // On output, the input graph A is destroyed. The k-trusses of A are returned // as a list of matrices (Cps, Cis, and Cxs). The k-truss of A is held in: // Cp = Cps [k]: array of size n+1, column pointers. // Ci = Cis [k]: row indices, of size nz = Cp [n]. // Cx = Cxs [k]: values, of size nz. // In this matrix, C(i,j) is the # of ntriangles in A that are in the k-truss // of A. The edges of the k-truss are a subset of the input graph A. Each // edge in C is part of at least k-2 triangles in C. The pattern of C, (that // is, spones(C) in MATLAB notation), is the adjacency matrix of the k-truss // subgraph of A. The edge weights of C are the support of each edge. That // is, C(i,j)=nt if the edge (i,j) is part of nt triangles in C. All edges in // C have support of at least k-2. The total number of triangles in C is // sum(sum(C))/6 in MATLAB notation. The number of edges in C is nnz(C)/2, in // MATLAB notation, or Cp [n]/2. C is returned as symmetric with a zero-free // diagonal, with all entries greater than or equal to k-2. The matrix C is // returned on output in compressed sparse column form in Cp, Ci, Cx, in the // entries Cps [k], Cis [k], and Cxs [k], respectively. That is, the pattern // and values of C(:,j) are held in Ci [Cp [j] ... Cp [j+1]-1] and Cx [Cp [j] // ... Cp [j+1]-1], where Cp [0] = 0 and Cp [n] = nnz (C). Note that returning // the k-trusses is optional; they are only returned if Cps, Cis, and Cxs are // non-NULL on input. The last k-truss, when k=kmax, is empty and this matrix // is not returned (Cps [kmax], Cis [kmax], and Cxs [kmax] are NULL). #include "ktruss_def.h" //------------------------------------------------------------------------------ // cmult: C<A>=A*A //------------------------------------------------------------------------------ // workspace for each thread: _Thread_local Index *restrict w = NULL ; _Thread_local bool *restrict Mark = NULL ; void cmult // C = (A*A) .* A, overwriting A with C ( // input/output: int64_t *restrict Ap, // column pointers, size n+1 Index *restrict Ai, // row indices, size anz = Ap [n] // output, content not defined on input: Index *restrict Ax, // values // input, not modified: const Index n, // A is n-by-n const int nthreads, // # of threads const Index chunk // scheduler chunk size ) { //---------------------------------------------------------------------- // C = (A*A) .* A //---------------------------------------------------------------------- // This step computes the values of C in Ax. The pattern of A and C // are the same, and are in Ap, Ai, and n. A is treated as if binary // so its values are ignored. #pragma omp parallel for num_threads(nthreads) schedule(dynamic,chunk) for (Index j = 0 ; j < n ; j++) { // scatter A(:,j) into Mark. All of w is zero. for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { Mark [Ai [p]] = 1 ; } // C(:,j) = (A * A(:,j)) .* Mark for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { const Index k = Ai [p] ; // (row k, not k-truss) // C(:,j) += (A(:,k) * A(k,j)) .* Mark for (int64_t pa = Ap [k] ; pa < Ap [k+1] ; pa++) { // C(i,j) += (A(i,k) * A(k,j)) .* Mark Index i = Ai [pa] ; if (Mark [i]) w [i]++ ; } } // gather C(:,j) from the workspace and clear the Mark for (int64_t p = Ap [j] ; p < Ap [j+1] ; p++) { Index i = Ai [p] ; Ax [p] = w [i] ; Mark [i] = 0 ; w [i] = 0 ; } } } //------------------------------------------------------------------------------ // allktruss: construct all k-trusses of a graph //------------------------------------------------------------------------------ bool allktruss // true if successful, false otherwise ( // input/output: int64_t *restrict Ap, // column pointers, size n+1 Index *restrict Ai, // row indices, size anz = Ap [n] // output, content not defined on input: Index *restrict Ax, // values // input, not modified: const Index n, // A is n-by-n const int threads, // # of threads const Index chunk, // scheduler chunk size // output statistics int64_t *restrict kmax, // smallest k where k-truss is empty int64_t *restrict ntris, // size n, ntris [k] is #triangles in k-truss int64_t *restrict nedges, // size n, nedges [k] is #edges in k-truss int64_t *restrict nstepss, // size n, nsteps [k] is #steps for k-truss // optional output k-trusses, if present int64_t **restrict Cps, // size n Index **restrict Cis, // size n Index **restrict Cxs // size n ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- if (nstepss == NULL || kmax == NULL || ntris == NULL || nedges == NULL) { return (false) ; } bool keep_all_ktrusses = (Cps != NULL && Cis != NULL && Cxs != NULL) ; int nthreads = (n < chunk) ? 1 : threads ; for (Index k = 0 ; k < 3 ; k++) { if (keep_all_ktrusses) { Cps [k] = NULL ; Cis [k] = NULL ; Cxs [k] = NULL ; } ntris [k] = 0 ; nedges [k] = 0 ; nstepss [k] = 0 ; } (*kmax) = 0 ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- bool ok = true ; #pragma omp parallel num_threads(nthreads) reduction(&&:ok) { w = (Index *) calloc (n, sizeof (Index)) ; Mark = (bool *) calloc (n, sizeof (bool )) ; ok = (Mark != NULL && w != NULL) ; } #pragma omp parallel num_threads(nthreads) { if (!ok) { // out of memory if (w != NULL) free (w ) ; if (Mark != NULL) free (Mark) ; } } if (!ok) return (false) ; //-------------------------------------------------------------------------- // C = allktruss (A) //-------------------------------------------------------------------------- double tmult = 0 ; double tsel = 0 ; double t1 = omp_get_wtime ( ) ; // C = (A*A) .* A, overwriting A with C int64_t last_cnz = Ap [n] ; cmult (Ap, Ai, Ax, n, nthreads, chunk) ; int64_t nsteps = 1 ; double t2 = omp_get_wtime ( ) ; printf ("cmult time: %g\n", t2-t1) ; tmult += (t2-t1) ; //-------------------------------------------------------------------------- for (Index k = 3 ; ; k++) { Index support = k-2 ; while (1) { //------------------------------------------------------------------ // C = C .* (C >= support) //------------------------------------------------------------------ // C is now in Ap, Ai, Ax, and n. // Prune all entries C(i,j) < support. double t1 = omp_get_wtime ( ) ; int64_t cnz = 0 ; for (Index j = 0 ; j < n ; j++) { // log the start of column C(:,j) int64_t p1 = Ap [j] ; Ap [j] = cnz ; for (int64_t p = p1 ; p < Ap [j+1] ; p++) { // consider the edge C(i,j) Index i = Ai [p] ; Index cij = Ax [p] ; if (cij >= support) { // the edge C(i,j) has enough support; keep it Ai [cnz ] = i ; Ax [cnz++] = cij ; } } } Ap [n] = cnz ; t2 = omp_get_wtime ( ) ; printf ("select time: %g\n", t2-t1) ; tsel += (t2-t1) ; //------------------------------------------------------------------ // check if k-truss has been found //------------------------------------------------------------------ if (cnz == last_cnz) { // k-truss has been found ntris [k] = ktruss_ntriangles (cnz, Ax) ; nedges [k] = cnz / 2 ; nstepss [k] = nsteps ; nsteps = 0 ; if (cnz == 0) { // this is the last k-truss (an empty matrix) if (keep_all_ktrusses) { Cps [k] = NULL ; Cis [k] = NULL ; Cxs [k] = NULL ; } (*kmax) = k ; #pragma omp parallel num_threads(nthreads) { free (w) ; free (Mark) ; } printf ("allktruss nthreads %d done: tmult %g tsel %g\n", nthreads, tmult, tsel) ; return (true) ; } else if (keep_all_ktrusses) { // save the k-truss in the list of output k-trusses int64_t *Cp = (int64_t *) malloc ((n+1) * sizeof (int64_t)); Index *Ci = (Index *) malloc (cnz * sizeof (Index)) ; Index *Cx = (Index *) malloc (cnz * sizeof (Index)) ; Cps [k] = Cp ; Cis [k] = Ci ; Cxs [k] = Cx ; if (Cp == NULL || Ci == NULL || Cx == NULL) { // out of memory: free all outputs for (Index j = 3 ; j <= k ; j++) { if (Cps [j] != NULL) free (Cps [j]) ; if (Cis [j] != NULL) free (Cis [j]) ; if (Cxs [j] != NULL) free (Cxs [j]) ; Cps [j] = NULL ; Cis [j] = NULL ; Cxs [j] = NULL ; } #pragma omp parallel num_threads(nthreads) { free (w) ; free (Mark) ; } return (false) ; } memcpy (Cp, Ap, (n+1) * sizeof (int64_t)) ; memcpy (Ci, Ai, cnz * sizeof (Index)) ; memcpy (Cx, Ax, cnz * sizeof (Index)) ; } // start finding the next k-truss break ; } //------------------------------------------------------------------ // continue searching for this k-truss //------------------------------------------------------------------ nsteps++ ; //------------------------------------------------------------------ // count the triangles for the next iteration //------------------------------------------------------------------ t1 = omp_get_wtime ( ) ; // C = (A*A) .* A, overwriting A with C cmult (Ap, Ai, Ax, n, nthreads, chunk) ; last_cnz = Ap [n] ; t2 = omp_get_wtime ( ) ; printf ("mult time: %g\n", t2-t1) ; tmult += (t2-t1) ; } } return (false) ; }
multi_index_hashing.h
/* multi_index_hashing.h * * Author: Fabian Meyer * Created On: 29 Oct 2019 * License: MIT * * Implementation is based on the work "Fast Search in Hamming Space with * Multi-Index Hashing" by Mohammad Norouzi, Ali Punjani and David J. Fleet. */ #ifndef KNN_MULTI_INDEX_HASHING_H_ #define KNN_MULTI_INDEX_HASHING_H_ #include <map> #include <set> #include "knn/distance_functors.h" #include "knn/query_heap.h" namespace knn { /** Class for performing KNN search in hamming space by multi-index hashing. */ template<typename Scalar> class MultiIndexHashing { public: static_assert(std::is_integral<Scalar>::value, "MultiIndexHashing Scalar has to be integral"); typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix; typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector; typedef knn::Matrixi Matrixi; private: HammingDistance<Scalar> distance_; Matrix dataCopy_; const Matrix *data_; bool sorted_; Scalar maxDist_; Index substrLen_; Index threads_; std::vector<std::map<Scalar, std::vector<Index>>> buckets_; template<typename Derived> Scalar extractCode(const Eigen::MatrixBase<Derived> &data, const Index idx, const Index offset) const { Index leftShift = std::max<Index>(0, static_cast<Index>(sizeof(Scalar)) - offset - substrLen_); Index rightShift = leftShift + offset; Scalar code = (data(idx, 0) << (leftShift * 8)) >> (rightShift * 8); if(static_cast<Index>(sizeof(Scalar)) - offset < substrLen_ && idx + 1 < data.rows()) { Index shift = 2 * static_cast<Index>(sizeof(Scalar)) - substrLen_ - offset; code |= data(idx+1, 0) << (shift * 8); } return code; } public: MultiIndexHashing() : distance_(), dataCopy_(), data_(nullptr), sorted_(true), maxDist_(0), substrLen_(1), threads_(1) { } /** Constructs an index with the given data. * This does not build the the index. * @param data NxM matrix, M points of dimension N * @param copy if true copies the data, otherwise assumes static data */ MultiIndexHashing(const Matrix &data, const bool copy=false) : MultiIndexHashing() { setData(data, copy); } /** Set the maximum distance for querying the index. * Note that if no maximum distance is used, this algorithm performs * basically a brute force search. * @param maxDist maximum distance, <= 0 for no limit */ void setMaxDistance(const Scalar maxDist) { maxDist_ = maxDist; } /** Set if the points returned by the queries should be sorted * according to their distance to the query points. * @param sorted sort query results */ void setSorted(const bool sorted) { sorted_ = sorted; } /** Set the amount of threads that should be used for building and * querying the tree. * OpenMP has to be enabled for this to work. * @param threads amount of threads, 0 for optimal choice */ void setThreads(const unsigned int threads) { threads_ = threads; } /** Set the length of substrings (in bytes) used for multi index hashing. * @param len lentth of bucket substrings in bytes*/ void setSubstringLength(const Index len) { substrLen_ = len; } /** Set the data points used for the KNN search. * @param data NxM matrix, M points of dimension N * @param copy if true data is copied, assumes static data otherwise */ void setData(const Matrix &data, const bool copy = false) { clear(); if(copy) { dataCopy_ = data; data_ = &dataCopy_; } else { data_ = &data; } } void build() { if(data_ == nullptr) throw std::runtime_error("cannot build MultiIndexHashing; data not set"); if(data_->size() == 0) throw std::runtime_error("cannot build MultiIndexHashing; data is empty"); const Matrix &data = *data_; const Index bytesPerVec = data.rows() * static_cast<Index>(sizeof(Scalar)); if(bytesPerVec % substrLen_ != 0) throw std::runtime_error("cannot build MultiIndexHashing; cannot divide byte count per vector by substring length without remainings"); buckets_.clear(); buckets_.resize(bytesPerVec / substrLen_); for(size_t i = 0; i < buckets_.size(); ++i) { Index start = static_cast<Index>(i) * substrLen_; Index idx = start / static_cast<Index>(sizeof(Scalar)); Index offset = start % static_cast<Index>(sizeof(Scalar)); std::map<Scalar, std::vector<Index>> &map = buckets_[i]; for(Index c = 0; c < data.cols(); ++c) { Scalar code = extractCode(data.col(c), idx, offset); if(map.find(code) == map.end()) map[code] = std::vector<Index>(); map[code].push_back(c); } } } template<typename Derived> void query(const Eigen::MatrixBase<Derived> &queryPoints, const size_t knn, Matrixi &indices, Matrix &distances) const { if(buckets_.size() == 0) throw std::runtime_error("cannot query MultiIndexHashing; not built yet"); if(queryPoints.rows() != dimension()) throw std::runtime_error("cannot query MultiIndexHashing; data and query points do not have same dimension"); const Matrix &data = *data_; indices.setConstant(knn, queryPoints.cols(), -1); distances.setConstant(knn, queryPoints.cols(), -1); Index *indicesRaw = indices.data(); Scalar *distsRaw = distances.data(); Scalar maxDistPart = maxDist_ / buckets_.size(); #pragma omp parallel for num_threads(threads_) for(Index c = 0; c < queryPoints.cols(); ++c) { std::set<Index> candidates; for(size_t i = 0; i < buckets_.size(); ++i) { Index start = static_cast<Index>(i) * substrLen_; Index idx = start / static_cast<Index>(sizeof(Scalar)); Index offset = start % static_cast<Index>(sizeof(Scalar)); const std::map<Scalar, std::vector<Index>> &map = buckets_[i]; Scalar code = extractCode(queryPoints.col(c), idx, offset); for(const auto &x: map) { Scalar dist = distance_(x.first, code); if(maxDistPart <= 0 || dist <= maxDistPart) { for(size_t j = 0; j < x.second.size(); ++j) candidates.insert(x.second[j]); } } } Scalar *distPoint = &distsRaw[c * knn]; Index *idxPoint = &indicesRaw[c * knn]; // create heap to find nearest neighbours QueryHeap<Scalar> dataHeap(idxPoint, distPoint, knn); for(Index idx: candidates) { Scalar dist = distance_(data.col(idx), queryPoints.col(c)); bool isInRange = maxDist_ <= 0 || dist <= maxDist_; bool isImprovement = !dataHeap.full() || dist < dataHeap.front(); if(isInRange && isImprovement) { if(dataHeap.full()) dataHeap.pop(); dataHeap.push(idx, dist); } } if(sorted_) dataHeap.sort(); } } /** Returns the amount of data points stored in the search index. * @return number of data points */ Index size() const { return data_ == nullptr ? 0 : data_->cols(); } /** Returns the dimension of the data points in the search index. * @return dimension of data points */ Index dimension() const { return data_ == nullptr ? 0 : data_->rows(); } void clear() { data_ = nullptr; dataCopy_.resize(0, 0); buckets_.clear(); } }; } #endif
ast-dump-openmp-begin-declare-variant_7.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int OK_1(void); #pragma omp begin declare variant match(implementation={vendor(intel)}) int OK_1(void) { return 1; } int OK_2(void) { return 1; } int not_OK(void) { return 1; } int OK_3(void) { return 1; } #pragma omp end declare variant int OK_3(void); int test(void) { // Should cause an error due to not_OK() return OK_1() + not_OK() + OK_3(); } // Make sure: // - we see a single error for `not_OK` // - we do not see errors for OK_{1,2,3} // FIXME: We actually do not see there error here. // This case is unlikely to happen in practise and hard to diagnose during SEMA. // We will issue an error during code generation instead. This is similar to the // diagnosis in other multi-versioning schemes. // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:14> col:5 used OK_1 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(intel)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <line:8:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'OK_1[implementation={vendor(intel)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <col:1, line:10:1> line:8:1 OK_1[implementation={vendor(intel)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_4:0x[a-z0-9]*]] <col:16, line:10:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_5:0x[a-z0-9]*]] <line:9:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_6:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:11:1, col:14> col:5 implicit OK_2 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(intel)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'OK_2[implementation={vendor(intel)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:13:1> line:11:1 OK_2[implementation={vendor(intel)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:16, line:13:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:12:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_14:0x[a-z0-9]*]] <line:14:1, col:16> col:5 implicit used not_OK 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_15:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(intel)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_16:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_17:0x[a-z0-9]*]] 'not_OK[implementation={vendor(intel)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_17]] <col:1, line:16:1> line:14:1 not_OK[implementation={vendor(intel)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:18, line:16:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:15:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_21:0x[a-z0-9]*]] <line:17:1, col:14> col:5 implicit used OK_3 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_22:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(intel)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_23:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_24:0x[a-z0-9]*]] 'OK_3[implementation={vendor(intel)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_24]] <col:1, line:19:1> line:17:1 OK_3[implementation={vendor(intel)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_25:0x[a-z0-9]*]] <col:16, line:19:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_26:0x[a-z0-9]*]] <line:18:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] prev [[ADDR_21]] <line:22:1, col:14> col:5 used OK_3 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_29:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(intel)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_23]] <line:17:1> 'int ({{.*}})' Function [[ADDR_24]] 'OK_3[implementation={vendor(intel)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_30:0x[a-z0-9]*]] <line:24:1, line:27:1> line:24:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_31:0x[a-z0-9]*]] <col:16, line:27:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_32:0x[a-z0-9]*]] <line:26:3, col:35> // CHECK-NEXT: `-BinaryOperator [[ADDR_33:0x[a-z0-9]*]] <col:10, col:35> 'int' '+' // CHECK-NEXT: |-BinaryOperator [[ADDR_34:0x[a-z0-9]*]] <col:10, col:26> 'int' '+' // CHECK-NEXT: | |-CallExpr [[ADDR_35:0x[a-z0-9]*]] <col:10, col:15> 'int' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_36:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_37:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'OK_1' 'int ({{.*}})' // CHECK-NEXT: | `-CallExpr [[ADDR_38:0x[a-z0-9]*]] <col:19, col:26> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_39:0x[a-z0-9]*]] <col:19> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_40:0x[a-z0-9]*]] <col:19> 'int ({{.*}})' {{.*}}Function [[ADDR_14]] 'not_OK' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_41:0x[a-z0-9]*]] <col:30, col:35> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_42:0x[a-z0-9]*]] <col:30> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_43:0x[a-z0-9]*]] <col:30> 'int ({{.*}})' {{.*}}Function [[ADDR_28]] 'OK_3' 'int ({{.*}})'
GB_AxB_saxpy3_template.c
//------------------------------------------------------------------------------ // GB_AxB_saxpy3_template: C=A*B, C<M>=A*B, or C<!M>=A*B via saxpy3 method //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // GB_AxB_saxpy3_template.c computes C=A*B for any semiring and matrix types, // where C is sparse or hypersparse. #include "GB_unused.h" //------------------------------------------------------------------------------ // template code for C=A*B via the saxpy3 method //------------------------------------------------------------------------------ { // double ttt = omp_get_wtime ( ) ; //-------------------------------------------------------------------------- // get the chunk size //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // get M, A, B, and C //-------------------------------------------------------------------------- int64_t *restrict Cp = C->p ; // const int64_t *restrict Ch = C->h ; const int64_t cvlen = C->vlen ; const int64_t cnvec = C->nvec ; const int64_t *restrict Bp = B->p ; const int64_t *restrict Bh = B->h ; const int8_t *restrict Bb = B->b ; const int64_t *restrict Bi = B->i ; const bool B_iso = B->iso ; const int64_t bvlen = B->vlen ; const bool B_jumbled = B->jumbled ; const bool B_is_sparse = GB_IS_SPARSE (B) ; const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ; const bool B_is_bitmap = GB_IS_BITMAP (B) ; const bool B_is_sparse_or_hyper = B_is_sparse || B_is_hyper ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int8_t *restrict Ab = A->b ; const int64_t *restrict Ai = A->i ; const int64_t anvec = A->nvec ; const int64_t avlen = A->vlen ; const bool A_is_sparse = GB_IS_SPARSE (A) ; const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; const bool A_is_bitmap = GB_IS_BITMAP (A) ; const bool A_iso = A->iso ; const bool A_jumbled = A->jumbled ; const bool A_ok_for_binary_search = ((A_is_sparse || A_is_hyper) && !A_jumbled) ; #if ( !GB_NO_MASK ) const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int8_t *restrict Mb = M->b ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const bool M_is_hyper = GB_IS_HYPERSPARSE (M) ; const bool M_is_bitmap = GB_IS_BITMAP (M) ; const bool M_jumbled = GB_JUMBLED (M) ; size_t msize = M->type->size ; int64_t mnvec = M->nvec ; int64_t mvlen = M->vlen ; #endif #if ( !GB_IS_ANY_PAIR_SEMIRING ) const GB_ATYPE *restrict Ax = (GB_ATYPE *) (A_is_pattern ? NULL : A->x) ; const GB_BTYPE *restrict Bx = (GB_BTYPE *) (B_is_pattern ? NULL : B->x) ; #endif //========================================================================== // phase2: numeric work for fine tasks //========================================================================== // Coarse tasks: nothing to do in phase2. // Fine tasks: compute nnz (C(:,j)), and values in Hx via atomics. int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < nfine ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kk = SaxpyTasks [taskid].vector ; int team_size = SaxpyTasks [taskid].team_size ; int64_t hash_size = SaxpyTasks [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; int64_t pB = SaxpyTasks [taskid].start ; int64_t pB_end = SaxpyTasks [taskid].end + 1 ; int64_t pleft = 0, pright = anvec-1 ; int64_t j = GBH (Bh, kk) ; GB_GET_T_FOR_SECONDJ ; #if !GB_IS_ANY_PAIR_SEMIRING GB_CTYPE *restrict Hx = (GB_CTYPE *) SaxpyTasks [taskid].Hx ; #endif #if GB_IS_PLUS_FC32_MONOID float *restrict Hx_real = (float *) Hx ; float *restrict Hx_imag = Hx_real + 1 ; #elif GB_IS_PLUS_FC64_MONOID double *restrict Hx_real = (double *) Hx ; double *restrict Hx_imag = Hx_real + 1 ; #endif if (use_Gustavson) { //------------------------------------------------------------------ // phase2: fine Gustavson task //------------------------------------------------------------------ // Hf [i] == 0: unlocked, i has not been seen in C(:,j). // Hx [i] is not initialized. // M(i,j) is 0, or M is not present. // if M: Hf [i] stays equal to 0 (or 3 if locked) // if !M, or no M: C(i,j) is a new entry seen for 1st time // Hf [i] == 1: unlocked, i has not been seen in C(:,j). // Hx [i] is not initialized. M is present. // M(i,j) is 1. (either M or !M case) // if M: C(i,j) is a new entry seen for the first time. // if !M: Hf [i] stays equal to 1 (or 3 if locked) // Hf [i] == 2: unlocked, i has been seen in C(:,j). // Hx [i] is initialized. This case is independent of M. // Hf [i] == 3: locked. Hx [i] cannot be accessed. int8_t *restrict Hf = (int8_t *restrict) SaxpyTasks [taskid].Hf ; #if ( GB_NO_MASK ) { // phase2: fine Gustavson task, C(:,j)=A*B(:,j) #include "GB_AxB_saxpy3_fineGus_phase2.c" } #elif ( !GB_MASK_COMP ) { // phase2: fine Gustavson task, C(:,j)<M(:,j)>=A*B(:,j) #include "GB_AxB_saxpy3_fineGus_M_phase2.c" } #else { // phase2: fine Gustavson task, C(:,j)<!M(:,j)>=A*B(:,j) #include "GB_AxB_saxpy3_fineGus_notM_phase2.c" } #endif } else { //------------------------------------------------------------------ // phase2: fine hash task //------------------------------------------------------------------ // Each hash entry Hf [hash] splits into two parts, (h,f). f // is in the 2 least significant bits. h is 62 bits, and is // the 1-based index i of the C(i,j) entry stored at that // location in the hash table. // If M is present (M or !M), and M(i,j)=1, then (i+1,1) // has been inserted into the hash table, in phase0. // Given Hf [hash] split into (h,f) // h == 0, f == 0: unlocked and unoccupied. // note that if f=0, h must be zero too. // h == i+1, f == 1: unlocked, occupied by M(i,j)=1. // C(i,j) has not been seen, or is ignored. // Hx is not initialized. M is present. // if !M: this entry will be ignored in C. // h == i+1, f == 2: unlocked, occupied by C(i,j). // Hx is initialized. M is no longer // relevant. // h == (anything), f == 3: locked. int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t hash_bits = (hash_size-1) ; #if ( GB_NO_MASK ) { //-------------------------------------------------------------- // phase2: fine hash task, C(:,j)=A*B(:,j) //-------------------------------------------------------------- // no mask present, or mask ignored #undef GB_CHECK_MASK_ij #include "GB_AxB_saxpy3_fineHash_phase2.c" } #elif ( !GB_MASK_COMP ) { //-------------------------------------------------------------- // phase2: fine hash task, C(:,j)<M(:,j)>=A*B(:,j) //-------------------------------------------------------------- GB_GET_M_j ; // get M(:,j) if (M_in_place) { // M is bitmap/as-if-full, thus not scattered into Hf if (M_is_bitmap && Mask_struct) { // M is bitmap and structural const int8_t *restrict Mjb = Mb + pM_start ; #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ if (!Mjb [i]) continue ; #include "GB_AxB_saxpy3_fineHash_phase2.c" } else { // M is bitmap/dense #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ const int64_t pM = pM_start + i ; \ GB_GET_M_ij (pM) ; \ if (!mij) continue ; #include "GB_AxB_saxpy3_fineHash_phase2.c" } } else { // M(:,j) is sparse and scattered into Hf #include "GB_AxB_saxpy3_fineHash_M_phase2.c" } } #else { //-------------------------------------------------------------- // phase2: fine hash task, C(:,j)<!M(:,j)>=A*B(:,j) //-------------------------------------------------------------- GB_GET_M_j ; // get M(:,j) if (M_in_place) { // M is bitmap/as-if-full, thus not scattered into Hf if (M_is_bitmap && Mask_struct) { // M is bitmap and structural const int8_t *restrict Mjb = Mb + pM_start ; #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ if (Mjb [i]) continue ; #include "GB_AxB_saxpy3_fineHash_phase2.c" } else { // M is bitmap/dense #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ const int64_t pM = pM_start + i ; \ GB_GET_M_ij (pM) ; \ if (mij) continue ; #include "GB_AxB_saxpy3_fineHash_phase2.c" } } else { // M(:,j) is sparse/hyper and scattered into Hf #include "GB_AxB_saxpy3_fineHash_notM_phase2.c" } } #endif } } // ttt = omp_get_wtime ( ) - ttt ; // GB_Global_timing_add (9, ttt) ; // ttt = omp_get_wtime ( ) ; //========================================================================== // phase3/phase4: count nnz(C(:,j)) for fine tasks, cumsum of Cp //========================================================================== GB_AxB_saxpy3_cumsum (C, SaxpyTasks, nfine, chunk, nthreads, Context) ; // ttt = omp_get_wtime ( ) - ttt ; // GB_Global_timing_add (10, ttt) ; // ttt = omp_get_wtime ( ) ; //========================================================================== // phase5: numeric phase for coarse tasks, gather for fine tasks //========================================================================== // C is iso for the ANY_PAIR semiring, and non-iso otherwise // allocate Ci and Cx int64_t cnz = Cp [cnvec] ; // set C->iso = GB_IS_ANY_PAIR_SEMIRING OK GrB_Info info = GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, GB_IS_ANY_PAIR_SEMIRING, Context) ; if (info != GrB_SUCCESS) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict Ci = C->i ; #if ( !GB_IS_ANY_PAIR_SEMIRING ) GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; #endif ASSERT (C->i_size == GB_Global_memtable_size (C->i)) ; // ttt = omp_get_wtime ( ) - ttt ; // GB_Global_timing_add (11, ttt) ; // ttt = omp_get_wtime ( ) ; bool C_jumbled = false ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(||:C_jumbled) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- #if !GB_IS_ANY_PAIR_SEMIRING GB_CTYPE *restrict Hx = (GB_CTYPE *) SaxpyTasks [taskid].Hx ; #endif int64_t hash_size = SaxpyTasks [taskid].hsize ; bool use_Gustavson = (hash_size == cvlen) ; bool task_C_jumbled = false ; if (taskid < nfine) { //------------------------------------------------------------------ // fine task: gather pattern and values //------------------------------------------------------------------ int64_t kk = SaxpyTasks [taskid].vector ; int team_size = SaxpyTasks [taskid].team_size ; int leader = SaxpyTasks [taskid].leader ; int my_teamid = taskid - leader ; int64_t pC = Cp [kk] ; if (use_Gustavson) { //-------------------------------------------------------------- // phase5: fine Gustavson task, C=A*B, C<M>=A*B, or C<!M>=A*B //-------------------------------------------------------------- // Hf [i] == 2 if C(i,j) is an entry in C(:,j) int8_t *restrict Hf = (int8_t *restrict) SaxpyTasks [taskid].Hf ; int64_t cjnz = Cp [kk+1] - pC ; int64_t istart, iend ; GB_PARTITION (istart, iend, cvlen, my_teamid, team_size) ; if (cjnz == cvlen) { // C(:,j) is dense for (int64_t i = istart ; i < iend ; i++) { Ci [pC + i] = i ; } // copy Hx [istart:iend-1] into Cx [pC+istart:pC+iend-1] GB_CIJ_MEMCPY (pC + istart, istart, iend - istart) ; } else { // C(:,j) is sparse pC += SaxpyTasks [taskid].my_cjnz ; for (int64_t i = istart ; i < iend ; i++) { if (Hf [i] == 2) { GB_CIJ_GATHER (pC, i) ; // Cx [pC] = Hx [i] Ci [pC++] = i ; } } } } else { //-------------------------------------------------------------- // phase5: fine hash task, C=A*B, C<M>=A*B, C<!M>=A*B //-------------------------------------------------------------- // (Hf [hash] & 3) == 2 if C(i,j) is an entry in C(:,j), // and the index i of the entry is (Hf [hash] >> 2) - 1. int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t mystart, myend ; GB_PARTITION (mystart, myend, hash_size, my_teamid, team_size) ; pC += SaxpyTasks [taskid].my_cjnz ; for (int64_t hash = mystart ; hash < myend ; hash++) { int64_t hf = Hf [hash] ; if ((hf & 3) == 2) { int64_t i = (hf >> 2) - 1 ; // found C(i,j) in hash Ci [pC] = i ; GB_CIJ_GATHER (pC, hash) ; // Cx [pC] = Hx [hash] pC++ ; } } task_C_jumbled = true ; } } else { //------------------------------------------------------------------ // numeric coarse task: compute C(:,kfirst:klast) //------------------------------------------------------------------ int64_t *restrict Hf = (int64_t *restrict) SaxpyTasks [taskid].Hf ; int64_t kfirst = SaxpyTasks [taskid].start ; int64_t klast = SaxpyTasks [taskid].end ; int64_t nk = klast - kfirst + 1 ; int64_t mark = 2*nk + 1 ; if (use_Gustavson) { //-------------------------------------------------------------- // phase5: coarse Gustavson task //-------------------------------------------------------------- #if ( GB_NO_MASK ) { // phase5: coarse Gustavson task, C=A*B #include "GB_AxB_saxpy3_coarseGus_noM_phase5.c" } #elif ( !GB_MASK_COMP ) { // phase5: coarse Gustavson task, C<M>=A*B #include "GB_AxB_saxpy3_coarseGus_M_phase5.c" } #else { // phase5: coarse Gustavson task, C<!M>=A*B #include "GB_AxB_saxpy3_coarseGus_notM_phase5.c" } #endif } else { //-------------------------------------------------------------- // phase5: coarse hash task //-------------------------------------------------------------- int64_t *restrict Hi = SaxpyTasks [taskid].Hi ; int64_t hash_bits = (hash_size-1) ; #if ( GB_NO_MASK ) { //---------------------------------------------------------- // phase5: coarse hash task, C=A*B //---------------------------------------------------------- // no mask present, or mask ignored (see below) #undef GB_CHECK_MASK_ij #include "GB_AxB_saxpy3_coarseHash_phase5.c" } #elif ( !GB_MASK_COMP ) { //---------------------------------------------------------- // phase5: coarse hash task, C<M>=A*B //---------------------------------------------------------- if (M_in_place) { // M is bitmap/as-if-full, thus not scattered into Hf if (M_is_bitmap && Mask_struct) { // M is bitmap and structural #define GB_MASK_IS_BITMAP_AND_STRUCTURAL #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ if (!Mjb [i]) continue ; #include "GB_AxB_saxpy3_coarseHash_phase5.c" } else { // M is bitmap/dense #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ const int64_t pM = pM_start + i ; \ GB_GET_M_ij (pM) ; \ if (!mij) continue ; #include "GB_AxB_saxpy3_coarseHash_phase5.c" } } else { // M is sparse and scattered into Hf #include "GB_AxB_saxpy3_coarseHash_M_phase5.c" } } #else { //---------------------------------------------------------- // phase5: coarse hash task, C<!M>=A*B //---------------------------------------------------------- if (M_in_place) { // M is bitmap/as-if-full, thus not scattered into Hf if (M_is_bitmap && Mask_struct) { // M is bitmap and structural #define GB_MASK_IS_BITMAP_AND_STRUCTURAL #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ if (Mjb [i]) continue ; #include "GB_AxB_saxpy3_coarseHash_phase5.c" } else { // M is bitmap/dense #undef GB_CHECK_MASK_ij #define GB_CHECK_MASK_ij \ const int64_t pM = pM_start + i ; \ GB_GET_M_ij (pM) ; \ if (mij) continue ; #include "GB_AxB_saxpy3_coarseHash_phase5.c" } } else { // M is sparse and scattered into Hf #include "GB_AxB_saxpy3_coarseHash_notM_phase5.c" } } #endif } } C_jumbled = C_jumbled || task_C_jumbled ; } //-------------------------------------------------------------------------- // log the state of C->jumbled //-------------------------------------------------------------------------- C->jumbled = C_jumbled ; // C is jumbled if any task left it jumbled // ttt = omp_get_wtime ( ) - ttt ; // GB_Global_timing_add (12, ttt) ; } #undef GB_NO_MASK #undef GB_MASK_COMP
vacation.c
/* ============================================================================= * * vacation.c * * ============================================================================= * * Copyright (C) Stanford University, 2006. All Rights Reserved. * Author: Chi Cao Minh * * ============================================================================= * * For the license of bayes/sort.h and bayes/sort.c, please see the header * of the files. * * ------------------------------------------------------------------------ * * For the license of kmeans, please see kmeans/LICENSE.kmeans * * ------------------------------------------------------------------------ * * For the license of ssca2, please see ssca2/COPYRIGHT * * ------------------------------------------------------------------------ * * For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the * header of the files. * * ------------------------------------------------------------------------ * * For the license of lib/rbtree.h and lib/rbtree.c, please see * lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree * * ------------------------------------------------------------------------ * * Unless otherwise noted, the following license applies to STAMP files: * * Copyright (c) 2007, Stanford University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Stanford University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * ============================================================================= */ #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <getopt.h> #include "client.h" #include "customer.h" #include "list.h" #include "manager.h" #include "map.h" #include "memory.h" #include "operation.h" #include "random.h" #include "reservation.h" #include "thread.h" #include "timer.h" #include "tm.h" #include "types.h" #include "utility.h" #include "../lib/instrument_roi.h" enum param_types { PARAM_CLIENTS = (unsigned char)'c', PARAM_NUMBER = (unsigned char)'n', PARAM_QUERIES = (unsigned char)'q', PARAM_RELATIONS = (unsigned char)'r', PARAM_TRANSACTIONS = (unsigned char)'t', PARAM_USER = (unsigned char)'u', }; #define PARAM_DEFAULT_CLIENTS (1) #define PARAM_DEFAULT_NUMBER (10) #define PARAM_DEFAULT_QUERIES (90) #define PARAM_DEFAULT_RELATIONS (1 << 16) #define PARAM_DEFAULT_TRANSACTIONS (1 << 26) #define PARAM_DEFAULT_USER (80) double global_params[256]; /* 256 = ascii limit */ /* ============================================================================= * displayUsage * ============================================================================= */ static void displayUsage (const char* appName) { printf("Usage: %s [options]\n", appName); puts("\nOptions: (defaults)\n"); printf(" c <UINT> Number of [c]lients (%i)\n", PARAM_DEFAULT_CLIENTS); printf(" n <UINT> [n]umber of user queries/transaction (%i)\n", PARAM_DEFAULT_NUMBER); printf(" q <UINT> Percentage of relations [q]ueried (%i)\n", PARAM_DEFAULT_QUERIES); printf(" r <UINT> Number of possible [r]elations (%i)\n", PARAM_DEFAULT_RELATIONS); printf(" t <UINT> Number of [t]ransactions (%i)\n", PARAM_DEFAULT_TRANSACTIONS); printf(" u <UINT> Percentage of [u]ser transactions (%i)\n", PARAM_DEFAULT_USER); exit(1); } /* ============================================================================= * setDefaultParams * ============================================================================= */ static void setDefaultParams () { global_params[PARAM_CLIENTS] = PARAM_DEFAULT_CLIENTS; global_params[PARAM_NUMBER] = PARAM_DEFAULT_NUMBER; global_params[PARAM_QUERIES] = PARAM_DEFAULT_QUERIES; global_params[PARAM_RELATIONS] = PARAM_DEFAULT_RELATIONS; global_params[PARAM_TRANSACTIONS] = PARAM_DEFAULT_TRANSACTIONS; global_params[PARAM_USER] = PARAM_DEFAULT_USER; } /* ============================================================================= * parseArgs * ============================================================================= */ static void parseArgs (long argc, char* const argv[]) { long i; long opt; opterr = 0; setDefaultParams(); while ((opt = getopt(argc, argv, "c:n:q:r:t:u:")) != -1) { switch (opt) { case 'c': case 'n': case 'q': case 'r': case 't': case 'u': global_params[(unsigned char)opt] = atol(optarg); break; case '?': default: opterr++; break; } } for (i = optind; i < argc; i++) { fprintf(stderr, "Non-option argument: %s\n", argv[i]); opterr++; } if (opterr) { displayUsage(argv[0]); } } /* ============================================================================= * addCustomer * -- Wrapper function * ============================================================================= */ static bool_t addCustomer (manager_t* managerPtr, long id, long num, long price) { return manager_addCustomer_seq(managerPtr, id); } /* ============================================================================= * initializeManager * ============================================================================= */ static manager_t* initializeManager () { manager_t* managerPtr; long i; long numRelation; random_t* randomPtr; long* ids; bool_t (*manager_add[])( manager_t*, long, long, long) = { &manager_addCar_seq, &manager_addFlight_seq, &manager_addRoom_seq, &addCustomer }; long t; long numTable = sizeof(manager_add) / sizeof(manager_add[0]); printf("Initializing manager... "); fflush(stdout); randomPtr = random_alloc(); assert(randomPtr != NULL); numRelation = (long)global_params[PARAM_RELATIONS]; ids = (long*)SEQ_MALLOC(numRelation * sizeof(long)); managerPtr = manager_alloc(numRelation); assert(managerPtr != NULL); for (i = 0; i < numRelation; i++) { ids[i] = i + 1; } for (t = 0; t < numTable; t++) { /* Shuffle ids */ for (i = 0; i < numRelation; i++) { long x = random_generate(randomPtr) % numRelation; long y = random_generate(randomPtr) % numRelation; long tmp = ids[x]; ids[x] = ids[y]; ids[y] = tmp; } /* Populate table */ for (i = 0; i < numRelation; i++) { bool_t status; long id = ids[i]; long num = ((random_generate(randomPtr) % 5) + 1) * 100; long price = ((random_generate(randomPtr) % 5) * 10) + 50; status = manager_add[t](managerPtr, id, num, price); assert(status); } } /* for t */ puts("done."); fflush(stdout); random_free(randomPtr); SEQ_FREE(ids); return managerPtr; } /* ============================================================================= * initializeClients * ============================================================================= */ static client_t** initializeClients (manager_t* managerPtr) { random_t* randomPtr; client_t** clients; long i; long numClient = (long)global_params[PARAM_CLIENTS]; long numTransaction = (long)global_params[PARAM_TRANSACTIONS]; long numTransactionPerClient; long numQueryPerTransaction = (long)global_params[PARAM_NUMBER]; long numRelation = (long)global_params[PARAM_RELATIONS]; long percentQuery = (long)global_params[PARAM_QUERIES]; long queryRange; long percentUser = (long)global_params[PARAM_USER]; printf("Initializing clients... "); fflush(stdout); randomPtr = random_alloc(); assert(randomPtr != NULL); clients = (client_t**)SEQ_MALLOC(numClient * sizeof(client_t*)); assert(clients != NULL); numTransactionPerClient = (long)((double)numTransaction / (double)numClient + 0.5); queryRange = (long)((double)percentQuery / 100.0 * (double)numRelation + 0.5); for (i = 0; i < numClient; i++) { clients[i] = client_alloc(i, managerPtr, numTransactionPerClient, numQueryPerTransaction, queryRange, percentUser); assert(clients[i] != NULL); } puts("done."); printf(" Transactions = %li\n", numTransaction); printf(" Clients = %li\n", numClient); printf(" Transactions/client = %li\n", numTransactionPerClient); printf(" Queries/transaction = %li\n", numQueryPerTransaction); printf(" Relations = %li\n", numRelation); printf(" Query percent = %li\n", percentQuery); printf(" Query range = %li\n", queryRange); printf(" Percent user = %li\n", percentUser); fflush(stdout); random_free(randomPtr); return clients; } /* ============================================================================= * checkTables * -- some simple checks (not comprehensive) * -- dependent on tasks generated for clients in initializeClients() * ============================================================================= */ static void checkTables (manager_t* managerPtr) { long i; long numRelation = (long)global_params[PARAM_RELATIONS]; MAP_T* customerTablePtr = managerPtr->customerTablePtr; MAP_T* tables[] = { managerPtr->carTablePtr, managerPtr->flightTablePtr, managerPtr->roomTablePtr, }; long numTable = sizeof(tables) / sizeof(tables[0]); bool_t (*manager_add[])(manager_t*, long, long, long) = { &manager_addCar_seq, &manager_addFlight_seq, &manager_addRoom_seq }; long t; printf("Checking tables... "); fflush(stdout); /* Check for unique customer IDs */ long percentQuery = (long)global_params[PARAM_QUERIES]; long queryRange = (long)((double)percentQuery / 100.0 * (double)numRelation + 0.5); long maxCustomerId = queryRange + 1; for (i = 1; i <= maxCustomerId; i++) { if (MAP_FIND(customerTablePtr, i)) { if (MAP_REMOVE(customerTablePtr, i)) { assert(!MAP_FIND(customerTablePtr, i)); } } } /* Check reservation tables for consistency and unique ids */ for (t = 0; t < numTable; t++) { MAP_T* tablePtr = tables[t]; for (i = 1; i <= numRelation; i++) { if (MAP_FIND(tablePtr, i)) { assert(manager_add[t](managerPtr, i, 0, 0)); /* validate entry */ if (MAP_REMOVE(tablePtr, i)) { assert(!MAP_REMOVE(tablePtr, i)); } } } } puts("done."); fflush(stdout); } /* ============================================================================= * freeClients * ============================================================================= */ static void freeClients (client_t** clients) { long i; long numClient = (long)global_params[PARAM_CLIENTS]; for (i = 0; i < numClient; i++) { client_t* clientPtr = clients[i]; client_free(clientPtr); } } /* ============================================================================= * main * ============================================================================= */ MAIN(argc, argv) { manager_t* managerPtr; client_t** clients; TIMER_T startTime; TIMER_T stopTime; printf("startup\n"); /* Initialization */ parseArgs(argc, (char** const)argv); SIM_GET_NUM_CPU(global_params[PARAM_CLIENTS]); long numThread = global_params[PARAM_CLIENTS]; TM_STARTUP(numThread); P_MEMORY_STARTUP(numThread); TM_THREAD_ENTER(); TM_BEGIN(); //sit_thread::sit_thread_barrier_wait(3); //TM_BEGIN(); managerPtr = initializeManager(); assert(managerPtr != NULL); printf("initializing clients\n"); clients = initializeClients(managerPtr); assert(clients != NULL); //TM_END(); // sit_thread::sit_thread_barrier_wait(4); thread_startup(numThread); TM_END(); /* Run transactions */ printf("Running clients...\n "); fflush(stdout); //GOTO_SIM(); //TIMER_READ(start); BEGIN_ROI; #ifdef OTM #pragma omp parallel { client_run(clients); } #else thread_start(client_run, (void*)clients); #endif END_ROI; //TIMER_READ(stop); //GOTO_REAL(); puts("done."); printf("Time = %0.6lf\n", TIMER_DIFF_SECONDS(startTime, stopTime)); fflush(stdout); checkTables(managerPtr); /* Clean up */ printf("Deallocating memory... "); fflush(stdout); freeClients(clients); /* * TODO: The contents of the manager's table need to be deallocated. */ manager_free(managerPtr); puts("done."); fflush(stdout); TM_SHUTDOWN(); P_MEMORY_SHUTDOWN(); thread_shutdown(); MAIN_RETURN(0); } /* ============================================================================= * * End of vacation.c * * ============================================================================= */
convolution_3x3_pack8to1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack8to1_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 8a-inch/8a-64-outch; kernel_tm_pack8to1.create(8 * inch / 8, 64, outch / 8 + outch % 8, (size_t)2u * 8, 8); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to1.channel(p / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00[1] = (__fp16)k1.row(q + i)[k]; g00[2] = (__fp16)k2.row(q + i)[k]; g00[3] = (__fp16)k3.row(q + i)[k]; g00[4] = (__fp16)k4.row(q + i)[k]; g00[5] = (__fp16)k5.row(q + i)[k]; g00[6] = (__fp16)k6.row(q + i)[k]; g00[7] = (__fp16)k7.row(q + i)[k]; g00 += 8; } } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = (__fp16)k0.row(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd64_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; //size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const __fp16* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 8; for (int m = 0; m < 8; m++) { float16x8_t _r00 = vld1q_f16(r0); float16x8_t _r01 = vld1q_f16(r0 + 8); float16x8_t _r02 = vld1q_f16(r0 + 16); float16x8_t _r03 = vld1q_f16(r0 + 24); float16x8_t _r04 = vld1q_f16(r0 + 32); float16x8_t _r05 = vld1q_f16(r0 + 40); float16x8_t _r06 = vld1q_f16(r0 + 48); float16x8_t _r07 = vld1q_f16(r0 + 56); float16x8_t _tmp0m = vfmaq_n_f16(vsubq_f16(_r00, _r06), vsubq_f16(_r04, _r02), 5.25f); float16x8_t _tmp7m = vfmaq_n_f16(vsubq_f16(_r07, _r01), vsubq_f16(_r03, _r05), 5.25f); vst1q_f16(tmp[0][m], _tmp0m); vst1q_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_r02, _r06), _r04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x8_t _tmp1m = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _tmp2m = vsubq_f16(_tmp12a, _tmp12b); vst1q_f16(tmp[1][m], _tmp1m); vst1q_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x8_t _tmp3m = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _tmp4m = vsubq_f16(_tmp34a, _tmp34b); vst1q_f16(tmp[3][m], _tmp3m); vst1q_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_r06, vfmsq_n_f16(_r02, _r04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x8_t _tmp5m = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _tmp6m = vsubq_f16(_tmp56a, _tmp56b); vst1q_f16(tmp[5][m], _tmp5m); vst1q_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 8; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 8; __fp16* r0_tm_1 = r0_tm_0 + tiles * 8; __fp16* r0_tm_2 = r0_tm_0 + tiles * 16; __fp16* r0_tm_3 = r0_tm_0 + tiles * 24; __fp16* r0_tm_4 = r0_tm_0 + tiles * 32; __fp16* r0_tm_5 = r0_tm_0 + tiles * 40; __fp16* r0_tm_6 = r0_tm_0 + tiles * 48; __fp16* r0_tm_7 = r0_tm_0 + tiles * 56; for (int m = 0; m < 8; m++) { float16x8_t _tmp00 = vld1q_f16(tmp[m][0]); float16x8_t _tmp01 = vld1q_f16(tmp[m][1]); float16x8_t _tmp02 = vld1q_f16(tmp[m][2]); float16x8_t _tmp03 = vld1q_f16(tmp[m][3]); float16x8_t _tmp04 = vld1q_f16(tmp[m][4]); float16x8_t _tmp05 = vld1q_f16(tmp[m][5]); float16x8_t _tmp06 = vld1q_f16(tmp[m][6]); float16x8_t _tmp07 = vld1q_f16(tmp[m][7]); float16x8_t _r0tm0 = vfmaq_n_f16(vsubq_f16(_tmp00, _tmp06), vsubq_f16(_tmp04, _tmp02), 5.25f); float16x8_t _r0tm7 = vfmaq_n_f16(vsubq_f16(_tmp07, _tmp01), vsubq_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x8_t _tmp12a = vfmsq_n_f16(vaddq_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x8_t _tmp12b = vfmsq_n_f16(vaddq_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x8_t _r0tm1 = vaddq_f16(_tmp12a, _tmp12b); float16x8_t _r0tm2 = vsubq_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x8_t _tmp34a = vfmsq_n_f16(vfmaq_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x8_t _tmp34b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x8_t _r0tm3 = vaddq_f16(_tmp34a, _tmp34b); float16x8_t _r0tm4 = vsubq_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x8_t _tmp56a = vfmaq_n_f16(_tmp06, vfmsq_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x8_t _tmp56b = vfmaq_n_f16(vfmsq_n_f16(vmulq_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x8_t _r0tm5 = vaddq_f16(_tmp56a, _tmp56b); float16x8_t _r0tm6 = vsubq_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f16(r0_tm_0, _r0tm0); vst1q_f16(r0_tm_1, _r0tm1); vst1q_f16(r0_tm_2, _r0tm2); vst1q_f16(r0_tm_3, _r0tm3); vst1q_f16(r0_tm_4, _r0tm4); vst1q_f16(r0_tm_5, _r0tm5); vst1q_f16(r0_tm_6, _r0tm6); vst1q_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 64; r0_tm_1 += tiles * 64; r0_tm_2 += tiles * 64; r0_tm_3 += tiles * 64; r0_tm_4 += tiles * 64; r0_tm_5 += tiles * 64; r0_tm_6 += tiles * 64; r0_tm_7 += tiles * 64; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); __fp16* output2_tm = top_blob_tm.channel(p + 2); __fp16* output3_tm = top_blob_tm.channel(p + 3); __fp16* output4_tm = top_blob_tm.channel(p + 4); __fp16* output5_tm = top_blob_tm.channel(p + 5); __fp16* output6_tm = top_blob_tm.channel(p + 6); __fp16* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%9], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.8h}, [%1], #16 \n" "st1 {v25.8h}, [%2], #16 \n" "st1 {v26.8h}, [%3], #16 \n" "st1 {v27.8h}, [%4], #16 \n" "st1 {v28.8h}, [%5], #16 \n" "st1 {v29.8h}, [%6], #16 \n" "st1 {v30.8h}, [%7], #16 \n" "st1 {v31.8h}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%9], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h}, [%1], #8 \n" "st1 {v25.4h}, [%2], #8 \n" "st1 {v26.4h}, [%3], #8 \n" "st1 {v27.4h}, [%4], #8 \n" "st1 {v28.4h}, [%5], #8 \n" "st1 {v29.4h}, [%6], #8 \n" "st1 {v30.4h}, [%7], #8 \n" "st1 {v31.4h}, [%8], #8 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.8h}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%10], #64 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%10], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.h}[0], [%1], #2 \n" "st1 {v30.h}[1], [%2], #2 \n" "st1 {v30.h}[2], [%3], #2 \n" "st1 {v30.h}[3], [%4], #2 \n" "st1 {v30.h}[4], [%5], #2 \n" "st1 {v30.h}[5], [%6], #2 \n" "st1 {v30.h}[6], [%7], #2 \n" "st1 {v30.h}[7], [%8], #2 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } } } remain_outch_start += nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v30.16b, v30.16b, v30.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.4h, v16.4h, v0.h[0] \n" "fmla v30.4h, v17.4h, v0.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "fmla v30.4h, v18.4h, v0.h[2] \n" "fmla v30.4h, v19.4h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.4h, v20.4h, v0.h[4] \n" "fmla v30.4h, v21.4h, v0.h[5] \n" "fmla v30.4h, v22.4h, v0.h[6] \n" "fmla v30.4h, v23.4h, v0.h[7] \n" "bne 0b \n" "st1 {v30.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < inch; q++) { float16x8_t _r0 = vld1q_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); _sum0 = vfmaq_f16(_sum0, _r0, _k0); kptr += 4; r0 += 4; } __fp16 sum0 = vaddvq_f32(vcvt_f32_f16(vadd_f16(vget_low_f16(_sum0), vget_high_f16(_sum0)))); output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const __fp16 bias0 = bias ? bias[p] : 0.f; // float32x2_t _bias0 = vdup_n_f32(bias0); __fp16 tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 1; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 1; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 2; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 3; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 5; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 6; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 7; // TODO neon optimize for (int m = 0; m < 8; m++) { __fp16 tmp024a = output0_tm_1[0] + output0_tm_2[0]; __fp16 tmp135a = output0_tm_1[0] - output0_tm_2[0]; __fp16 tmp024b = output0_tm_3[0] + output0_tm_4[0]; __fp16 tmp135b = output0_tm_3[0] - output0_tm_4[0]; __fp16 tmp024c = output0_tm_5[0] + output0_tm_6[0]; __fp16 tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 8; output0_tm_1 += tiles * 8; output0_tm_2 += tiles * 8; output0_tm_3 += tiles * 8; output0_tm_4 += tiles * 8; output0_tm_5 += tiles * 8; output0_tm_6 += tiles * 8; output0_tm_7 += tiles * 8; } __fp16* output0 = out0.row<__fp16>(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const __fp16* tmp0 = tmp[m]; __fp16 tmp024a = tmp0[1] + tmp0[2]; __fp16 tmp135a = tmp0[1] - tmp0[2]; __fp16 tmp024b = tmp0[3] + tmp0[4]; __fp16 tmp135b = tmp0[3] - tmp0[4]; __fp16 tmp024c = tmp0[5] + tmp0[6]; __fp16 tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
nt2_fmt_plug.c
/* * NT-ng format, using intrinsics. * * This software is Copyright 2011, 2012 magnum, and it is hereby released to * the general public under the following terms: Redistribution and use in * source and binary forms, with or without modification, are permitted. * * Losely based on rawSHA1, by bartavelle * and is also using his mmx/sse2/simd-intrinsics functions * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_NT2; #elif FMT_REGISTERS_H john_register_one(&fmt_NT2); #else #include <string.h> #include "arch.h" //#undef SIMD_COEF_32 //#undef SIMD_PARA_MD4 /* * Only effective for SIMD. * Undef to disable reversing steps for benchmarking. */ #define REVERSE_STEPS #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_MD4) #endif #include "md4.h" #include "misc.h" #include "common.h" #include "formats.h" #include "options.h" #include "unicode.h" #include "memory.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "NT" #define FORMAT_NAME "" #define ALGORITHM_NAME "MD4 " MD4_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CIPHERTEXT_LENGTH 32 #define FORMAT_TAG "$NT$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define DIGEST_SIZE 16 #define BINARY_SIZE DIGEST_SIZE #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef SIMD_COEF_32 #if defined(_OPENMP) #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 512 // tuned for i7 w/o HT #endif #endif #define PLAINTEXT_LENGTH 27 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + ((i)&3) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32*4 ) #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char *source(char *source, void *binary) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1] = FORMAT_TAG; uint32_t b[4]; char *p; int i, j; memcpy(b, binary, sizeof(b)); #if SIMD_COEF_32 && defined(REVERSE_STEPS) md4_unreverse(b); #endif #if ARCH_LITTLE_ENDIAN==0 alter_endianity(b, 16); #endif p = &out[TAG_LENGTH]; for (i = 0; i < 4; i++) for (j = 0; j < 8; j++) *p++ = itoa16[(b[i] >> ((j ^ 1) * 4)) & 0xf]; return out; } #ifdef SIMD_COEF_32 static unsigned char (*saved_key); static unsigned char (*crypt_key); static unsigned int (**buf_ptr); #else static MD4_CTX ctx; static int saved_len; static UTF16 saved_key[PLAINTEXT_LENGTH + 1]; static uint32_t crypt_key[DIGEST_SIZE / 4]; #endif // Note: the ISO-8859-1 plaintexts will be replaced in init() if running UTF-8 static struct fmt_tests tests[] = { {"b7e4b9022cd45f275334bbdb83bb5be5", "John the Ripper"}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$31d6cfe0d16ae931b73c59d7e0c089c0", ""}, {"$NT$7a21990fcd3d759941e45c490f143d5f", "12345"}, {"$NT$f9e37e83b83c47a93c2f09f66408631b", "abc123"}, {"$NT$8846f7eaee8fb117ad06bdd830b7586c", "password"}, {"$NT$2b2ac2d1c7c8fda6cea80b5fad7563aa", "computer"}, {"$NT$32ed87bdb5fdc5e9cba88547376818d4", "123456"}, {"$NT$b7e0ea9fbffcf6dd83086e905089effd", "tigger"}, {"$NT$7ce21f17c0aee7fb9ceba532d0546ad6", "1234"}, {"$NT$b23a90d0aad9da3615fafc27a1b8baeb", "a1b2c3"}, {"$NT$2d20d252a479f485cdf5e171d93985bf", "qwerty"}, {"$NT$3dbde697d71690a769204beb12283678", "123"}, {"$NT$c889c75b7c1aae1f7150c5681136e70e", "xxx"}, {"$NT$d5173c778e0f56d9fc47e3b3c829aca7", "money"}, {"$NT$0cb6948805f797bf2a82807973b89537", "test"}, {"$NT$0569fcf2b14b9c7f3d3b5f080cbd85e5", "carmen"}, {"$NT$f09ab1733a528f430353834152c8a90e", "mickey"}, {"$NT$878d8014606cda29677a44efa1353fc7", "secret"}, {"$NT$85ac333bbfcbaa62ba9f8afb76f06268", "summer"}, {"$NT$5962cc080506d90be8943118f968e164", "internet"}, {"$NT$f07206c3869bda5acd38a3d923a95d2a", "service"}, {"$NT$d0dfc65e8f286ef82f6b172789a0ae1c", "canada"}, {"$NT$066ddfd4ef0e9cd7c256fe77191ef43c", "hello"}, {"$NT$39b8620e745b8aa4d1108e22f74f29e2", "ranger"}, {"$NT$8d4ef8654a9adc66d4f628e94f66e31b", "shadow"}, {"$NT$320a78179516c385e35a93ffa0b1c4ac", "baseball"}, {"$NT$e533d171ac592a4e70498a58b854717c", "donald"}, {"$NT$5eee54ce19b97c11fd02e531dd268b4c", "harley"}, {"$NT$6241f038703cbfb7cc837e3ee04f0f6b", "hockey"}, {"$NT$becedb42ec3c5c7f965255338be4453c", "letmein"}, {"$NT$ec2c9f3346af1fb8e4ee94f286bac5ad", "maggie"}, {"$NT$f5794cbd75cf43d1eb21fad565c7e21c", "mike"}, {"$NT$74ed32086b1317b742c3a92148df1019", "mustang"}, {"$NT$63af6e1f1dd9ecd82f17d37881cb92e6", "snoopy"}, {"$NT$58def5844fe58e8f26a65fff9deb3827", "buster"}, {"$NT$f7eb9c06fafaa23c4bcf22ba6781c1e2", "dragon"}, {"$NT$dd555241a4321657e8b827a40b67dd4a", "jordan"}, {"$NT$bb53a477af18526ada697ce2e51f76b3", "michael"}, {"$NT$92b7b06bb313bf666640c5a1e75e0c18", "michelle"}, {NULL} }; static void set_key_utf8(char *_key, int index); static void set_key_CP(char *_key, int index); static void init(struct fmt_main *self) { #if SIMD_COEF_32 int i; #endif #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (options.target_enc == UTF_8) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_utf8; #if SIMD_COEF_32 /* kick it up from 27. We will truncate in setkey_utf8() */ self->params.plaintext_length = 3 * PLAINTEXT_LENGTH; #endif tests[1].plaintext = "\xC3\xBC"; // German u-umlaut in UTF-8 tests[1].ciphertext = "$NT$8bd6e4fb88e01009818749c5443ea712"; tests[2].plaintext = "\xC3\xBC\xC3\xBC"; // two of them tests[2].ciphertext = "$NT$cc1260adb6985ca749f150c7e0b22063"; tests[3].plaintext = "\xE2\x82\xAC"; // euro sign tests[3].ciphertext = "$NT$030926b781938db4365d46adc7cfbcb8"; tests[4].plaintext = "\xE2\x82\xAC\xE2\x82\xAC"; tests[4].ciphertext = "$NT$682467b963bb4e61943e170a04f7db46"; } else { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) { /* This avoids an if clause for every set_key */ self->methods.set_key = set_key_CP; } if (CP_to_Unicode[0xfc] == 0x00fc) { tests[1].plaintext = "\xFC"; // German u-umlaut in UTF-8 tests[1].ciphertext = "$NT$8bd6e4fb88e01009818749c5443ea712"; tests[2].plaintext = "\xFC\xFC"; // two of them tests[2].ciphertext = "$NT$cc1260adb6985ca749f150c7e0b22063"; tests[3].plaintext = "\xFC\xFC\xFC"; // 3 of them tests[3].ciphertext = "$NT$2e583e8c210fb101994c19877ac53b89"; tests[4].plaintext = "\xFC\xFC\xFC\xFC"; tests[4].ciphertext = "$NT$243bb98e7704797f92b1dd7ded6da0d0"; } } #if SIMD_COEF_32 saved_key = mem_calloc_align(64 * self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(DIGEST_SIZE * self->params.max_keys_per_crypt, sizeof(*crypt_key), MEM_ALIGN_SIMD); buf_ptr = mem_calloc(self->params.max_keys_per_crypt, sizeof(*buf_ptr)); for (i=0; i<self->params.max_keys_per_crypt; i++) buf_ptr[i] = (unsigned int*)&saved_key[GETPOS(0, i)]; #endif } static void done(void) { #if SIMD_COEF_32 MEM_FREE(buf_ptr); MEM_FREE(crypt_key); MEM_FREE(saved_key); #endif } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[37]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(&out[TAG_LENGTH], ciphertext, 32); out[36] = 0; strlwr(&out[TAG_LENGTH]); return out; } static int valid(char *ciphertext, struct fmt_main *self) { char *pos; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; for (pos = ciphertext; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++); if (!*pos && pos - ciphertext == CIPHERTEXT_LENGTH) return 1; else return 0; } // here to 'handle' the pwdump files: user:uid:lmhash:ntlmhash::: // Note, we address the user id inside loader. static char *prepare(char *split_fields[10], struct fmt_main *self) { static char out[33 + TAG_LENGTH + 1]; if (!valid(split_fields[1], self)) { if (split_fields[3] && strlen(split_fields[3]) == 32) { sprintf(out, "%s%s", FORMAT_TAG, split_fields[3]); if (valid(out, self)) return out; } } return split_fields[1]; } static void *get_binary(char *ciphertext) { static union { unsigned long dummy; unsigned int i[DIGEST_SIZE/sizeof(unsigned int)]; } _out; unsigned int *out = _out.i; unsigned int i; unsigned int temp; ciphertext += TAG_LENGTH; for (i=0; i<4; i++) { temp = ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+0])]))<<4; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+1])])); temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+2])]))<<12; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+3])]))<<8; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+4])]))<<20; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+5])]))<<16; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+6])]))<<28; temp |= ((unsigned int)(atoi16[ARCH_INDEX(ciphertext[i*8+7])]))<<24; #if ARCH_LITTLE_ENDIAN out[i]=temp; #else out[i]=JOHNSWAP(temp); #endif } #if SIMD_COEF_32 && defined(REVERSE_STEPS) md4_reverse(out); #endif //dump_stuff_msg("\nbinary", out, 16); return out; } // ISO-8859-1 to UCS-2, directly into vector key buffer static void set_key(char *_key, int index) { #ifdef SIMD_COEF_32 const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else #if ARCH_LITTLE_ENDIAN UTF8 *s = (UTF8*)_key; UTF16 *d = saved_key; while (*s) *d++ = *s++; *d = 0; saved_len = (int)((char*)d - (char*)saved_key); #else UTF8 *s = (UTF8*)_key; UTF8 *d = (UTF8*)saved_key; while (*s) { *d++ = *s++; ++d; } *d = 0; saved_len = (int)((char*)d - (char*)saved_key); #endif // dump_stuff_msg(_key, saved_key, 24); #endif } // Legacy codepage to UCS-2, directly into vector key buffer static void set_key_CP(char *_key, int index) { #ifdef SIMD_COEF_32 const unsigned char *key = (unsigned char*)_key; unsigned int *keybuf_word = buf_ptr[index]; unsigned int len, temp2; len = 0; while((temp2 = *key++)) { unsigned int temp; temp2 = CP_to_Unicode[temp2]; if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1) { temp = CP_to_Unicode[temp]; temp2 |= (temp << 16); *keybuf_word = temp2; } else { temp2 |= (0x80 << 16); *keybuf_word = temp2; len++; goto key_cleaning_enc; } len += 2; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning_enc: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len = enc_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_len < 0) saved_len = strlen16(saved_key); #endif } // UTF-8 to UCS-2, directly into vector key buffer static void set_key_utf8(char *_key, int index) { #ifdef SIMD_COEF_32 const UTF8 *source = (UTF8*)_key; unsigned int *keybuf_word = buf_ptr[index]; UTF32 chl, chh = 0x80; unsigned int len = 0; while (*source) { chl = *source; if (chl >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f]; switch (extraBytesToRead) { #if NT_FULL_UNICODE case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; #endif case 2: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 1: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; case 0: break; default: goto bailout; } chl -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; #if NT_FULL_UNICODE if (chl > UNI_MAX_BMP) { if (len == PLAINTEXT_LENGTH) { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } #define halfBase 0x0010000UL #define halfShift 10 #define halfMask 0x3FFUL #define UNI_SUR_HIGH_START (UTF32)0xD800 #define UNI_SUR_LOW_START (UTF32)0xDC00 chl -= halfBase; chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);; chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START); len++; } else #endif if (*source && len < PLAINTEXT_LENGTH) { chh = *source; if (chh >= 0xC0) { unsigned int extraBytesToRead = opt_trailingBytesUTF8[chh & 0x3f]; switch (extraBytesToRead) { #if NT_FULL_UNICODE case 3: ++source; if (*source) { chl <<= 6; chl += *source; } else goto bailout; #endif case 2: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 1: ++source; if (*source) { chh <<= 6; chh += *source; } else goto bailout; case 0: break; default: goto bailout; } chh -= offsetsFromUTF8[extraBytesToRead]; } source++; len++; } else { chh = 0x80; *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; break; } *keybuf_word = (chh << 16) | chl; keybuf_word += SIMD_COEF_32; } if (chh != 0x80 || len == 0) { *keybuf_word = 0x80; keybuf_word += SIMD_COEF_32; } bailout: while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } ((unsigned int *)saved_key)[14*SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32*16*SIMD_COEF_32] = len << 4; #else saved_len = utf8_to_utf16((UTF16*)&saved_key, PLAINTEXT_LENGTH + 1, (unsigned char*)_key, strlen(_key)) << 1; if (saved_len < 0) saved_len = strlen16(saved_key); #endif } static char *get_key(int index) { #ifdef SIMD_COEF_32 // Get the key back from the key buffer, from UCS-2 unsigned int *keybuffer = (unsigned int*)&saved_key[GETPOS(0, index)]; static UTF16 key[PLAINTEXT_LENGTH + 1]; unsigned int md4_size=0; unsigned int i=0; for (; md4_size < PLAINTEXT_LENGTH; i += SIMD_COEF_32, md4_size++) { key[md4_size] = keybuffer[i]; key[md4_size+1] = keybuffer[i] >> 16; if (key[md4_size] == 0x80 && key[md4_size+1] == 0) { key[md4_size] = 0; break; } ++md4_size; if (key[md4_size] == 0x80 && ((keybuffer[i+SIMD_COEF_32]&0xFFFF) == 0 || md4_size == PLAINTEXT_LENGTH)) { key[md4_size] = 0; break; } } return (char*)utf16_to_enc(key); #else return (char*)utf16_to_enc(saved_key); #endif } #ifndef REVERSE_STEPS #undef SSEi_REVERSE_STEPS #define SSEi_REVERSE_STEPS 0 #endif static int crypt_all(int *pcount, struct db_salt *salt) { #ifdef SIMD_COEF_32 int i = 0; #ifdef _OPENMP const unsigned int count = (*pcount + NBKEYS - 1) / NBKEYS; #pragma omp parallel for for (i = 0; i < count; i++) #endif SIMDmd4body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*DIGEST_SIZE], NULL, SSEi_REVERSE_STEPS | SSEi_MIXED_IN); #else MD4_Init( &ctx ); MD4_Update(&ctx, (unsigned char*)saved_key, saved_len); MD4_Final((unsigned char*) crypt_key, &ctx); #endif return *pcount; } static int cmp_all(void *binary, int count) { #ifdef SIMD_COEF_32 unsigned int x, y; #ifdef _OPENMP const unsigned int c = (count + SIMD_COEF_32 - 1) / SIMD_COEF_32; #else const unsigned int c = SIMD_PARA_MD4; #endif for (y = 0; y < c; y++) for (x = 0; x < SIMD_COEF_32; x++) { if ( ((uint32_t*)binary)[1] == ((uint32_t*)crypt_key)[y*SIMD_COEF_32*4+x+SIMD_COEF_32] ) return 1; } return 0; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 unsigned int x = index&(SIMD_COEF_32-1); unsigned int y = (unsigned int)index/SIMD_COEF_32; return ((uint32_t*)binary)[1] == ((uint32_t*)crypt_key)[x+y*SIMD_COEF_32*4+SIMD_COEF_32]; #else return !memcmp(binary, crypt_key, BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { #ifdef SIMD_COEF_32 uint32_t crypt_key[DIGEST_SIZE / 4]; UTF16 u16[PLAINTEXT_LENGTH + 1]; MD4_CTX ctx; UTF8 *key = (UTF8*)get_key(index); int len = enc_to_utf16(u16, PLAINTEXT_LENGTH, key, strlen((char*)key)); if (len <= 0) len = strlen16(u16); MD4_Init(&ctx); MD4_Update(&ctx, u16, len << 1); MD4_Final((void*)crypt_key, &ctx); #ifdef REVERSE_STEPS md4_reverse(crypt_key); #endif return !memcmp(get_binary(source), crypt_key, DIGEST_SIZE); #else return 1; #endif } #ifdef SIMD_COEF_32 #define SIMD_INDEX (index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SIMD_COEF_32*4+SIMD_COEF_32 static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[SIMD_INDEX] & PH_MASK_6; } #else static int get_hash_0(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_0; } static int get_hash_1(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_1; } static int get_hash_2(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_2; } static int get_hash_3(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_3; } static int get_hash_4(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_4; } static int get_hash_5(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_5; } static int get_hash_6(int index) { return ((uint32_t*)crypt_key)[1] & PH_MASK_6; } #endif static int binary_hash_0(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_0; } static int binary_hash_1(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_1; } static int binary_hash_2(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_2; } static int binary_hash_3(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_3; } static int binary_hash_4(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_4; } static int binary_hash_5(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_5; } static int binary_hash_6(void * binary) { return ((uint32_t*)binary)[1] & PH_MASK_6; } struct fmt_main fmt_NT2 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, fmt_default_salt, { NULL }, source, { binary_hash_0, binary_hash_1, binary_hash_2, binary_hash_3, binary_hash_4, binary_hash_5, binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
strm.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ #include <stdio.h> #include <unistd.h> #include <math.h> #include <float.h> #include <limits.h> /*#include <sys/time.h>*/ // Example main arguments // #define MARGS "" #include "lime.h" /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE #define STREAM_ARRAY_SIZE 10000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 #define NTIMES 10 #endif #endif #ifndef NTIMES #define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET #define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to [email protected] * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ #define HLINE "-------------------------------------------------------------\n" #ifndef MIN #define MIN(x,y) ((x)<(y)?(x):(y)) #endif #ifndef MAX #define MAX(x,y) ((x)>(y)?(x):(y)) #endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif #ifdef DYN static STREAM_TYPE *a, *b, *c; #else static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; #endif static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern int checktick(); extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int MAIN(int argc, char *argv[]) { int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif #ifdef DYN a = NALLOC(STREAM_TYPE, STREAM_ARRAY_SIZE+OFFSET); b = NALLOC(STREAM_TYPE, STREAM_ARRAY_SIZE+OFFSET); c = NALLOC(STREAM_TYPE, STREAM_ARRAY_SIZE+OFFSET); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Array addr %p %p %p\n", a, b, c); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); #ifdef TIME_CHECK { int quantum; double t; if ((quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); } #endif /* TIME_CHECK */ /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ CLOCKS_EMULATE // CACHE_BARRIER(NULL) TRACE_START STATS_START scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } // CACHE_BARRIER(NULL) STATS_STOP TRACE_STOP CLOCKS_NORMAL /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); STATS_PRINT #ifdef DYN NFREE(a); NFREE(b); NFREE(c); #endif TRACE_CAP return 0; } #define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ double mysecond() { #if 0 struct timeval tp; int i; i = gettimeofday(&tp,NULL); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); #else tick_t t; tget(t); return(tvesec(tval(t))); #endif } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults() { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; #ifdef TIME_CHECK /* a[] is modified during timing check */ aj = 2.0E0 * aj; #endif /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",(unsigned long)sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #ifdef _OPENMP #pragma omp parallel for #endif for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % John Cristy % % October 1996 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate.h" #include "magick/blob.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/constitute.h" #include "magick/decorate.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/effect.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/montage.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/random-private.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resize.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/shear.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" #include "magick/threshold.h" #ifdef MAGICKCORE_CLPERFMARKER #include "CLPerfMarker.h" #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveBlurImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=AdaptiveBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *AdaptiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double **kernel, normalize; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) LevelImage(edge_image,"20%,95%"); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) LevelImage(edge_image,"10%,95%"); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p, *restrict r; register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*QuantumScale* GetPixelIntensity(edge_image,r)-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel[i]; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImageChannel) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *AdaptiveSharpenImageChannel(const Image *image, % const ChannelType channel,double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=AdaptiveSharpenImageChannel(image,DefaultChannels,radius,sigma, exception); return(sharp_image); } MagickExport Image *AdaptiveSharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double **kernel, normalize; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) <= MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass) == MagickFalse) { InheritException(exception,&sharp_image->exception); sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brighness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) LevelImage(edge_image,"20%,95%"); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) LevelImage(edge_image,"10%,95%"); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p, *restrict r; register IndexPacket *restrict sharp_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } sharp_indexes=GetCacheViewAuthenticIndexQueue(sharp_view); for (x=0; x < (ssize_t) sharp_image->columns; x++) { double alpha, gamma; DoublePixelPacket pixel; register const double *restrict k; register ssize_t i, u, v; gamma=0.0; i=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (i < 0) i=0; else if (i > (ssize_t) width) i=(ssize_t) width; if ((i & 0x01) != 0) i--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-i)/2L),y- (ssize_t) ((width-i)/2L),width-i,width-i,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); k=kernel[i]; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (v=0; v < (ssize_t) (width-i); v++) { for (u=0; u < (ssize_t) (width-i); u++) { alpha=1.0; if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); if ((channel & RedChannel) != 0) pixel.red+=(*k)*alpha*GetPixelRed(p); if ((channel & GreenChannel) != 0) pixel.green+=(*k)*alpha*GetPixelGreen(p); if ((channel & BlueChannel) != 0) pixel.blue+=(*k)*alpha*GetPixelBlue(p); if ((channel & OpacityChannel) != 0) pixel.opacity+=(*k)*GetPixelOpacity(p); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+(width-i)*v+u); gamma+=(*k)*alpha; k++; p++; } } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(sharp_indexes+x,ClampToQuantum(gamma*pixel.index)); q++; r++; } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImageChannel) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *BlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=BlurImageChannel(image,DefaultChannels,radius,sigma,exception); return(blur_image); } MagickExport Image *BlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image = NULL; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); (void) FormatLocaleString(geometry,MaxTextExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* blur_image = AccelerateConvolveImageChannel(image,channel,kernel_info,exception); */ if (blur_image==NULL) { blur_image=MorphologyApply(image,channel,ConvolveMorphology,1,kernel_info, UndefinedCompositeOp,0.0,exception); } kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const size_t order, % const double *kernel,ExceptionInfo *exception) % Image *ConvolveImageChannel(const Image *image,const ChannelType channel, % const size_t order,const double *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o order: the number of columns and rows in the filter kernel. % % o kernel: An array of double representing the convolution kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image,const size_t order, const double *kernel,ExceptionInfo *exception) { Image *convolve_image; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif convolve_image=ConvolveImageChannel(image,DefaultChannels,order,kernel, exception); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(convolve_image); } MagickExport Image *ConvolveImageChannel(const Image *image, const ChannelType channel,const size_t order,const double *kernel, ExceptionInfo *exception) { Image *convolve_image; KernelInfo *kernel_info; register ssize_t i; kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=order; kernel_info->height=order; kernel_info->x=(ssize_t) (order-1)/2; kernel_info->y=(ssize_t) (order-1)/2; kernel_info->signature=MagickSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (order*order); i++) kernel_info->values[i]=kernel[i]; convolve_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); if (convolve_image==NULL) { convolve_image=MorphologyApply(image,channel,ConvolveMorphology,1,kernel_info, UndefinedCompositeOp,0.0,exception); } kernel_info=DestroyKernelInfo(kernel_info); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *restrict f,Quantum *restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) p[i]; if ((SignedQuantum) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_threads(image,image,1,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; SignedQuantum v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] >= (v+ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(SignedQuantum) q[i]; if (((SignedQuantum) s[i] <= (v-ScaleCharToQuantum(2))) && ((SignedQuantum) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; register ssize_t i; Quantum *restrict buffer, *restrict pixels; size_t length, number_channels; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); despeckle_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(despeckle_image,DirectClass) == MagickFalse) { InheritException(exception,&despeckle_image->exception); despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; number_channels=(size_t) (image->colorspace == CMYKColorspace ? 5 : 4); image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) number_channels; i++) { register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; if ((image->matte == MagickFalse) && (i == 3)) continue; (void) ResetMagickMemory(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: pixels[j]=GetPixelRed(p); break; case 1: pixels[j]=GetPixelGreen(p); break; case 2: pixels[j]=GetPixelBlue(p); break; case 3: pixels[j]=GetPixelOpacity(p); break; case 4: pixels[j]=GetPixelBlack(indexes+x); break; default: break; } p++; j++; } j++; } (void) ResetMagickMemory(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register IndexPacket *restrict indexes; register PixelPacket *restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(despeckle_view); j++; for (x=0; x < (ssize_t) image->columns; x++) { switch (i) { case 0: SetPixelRed(q,pixels[j]); break; case 1: SetPixelGreen(q,pixels[j]); break; case 2: SetPixelBlue(q,pixels[j]); break; case 3: SetPixelOpacity(q,pixels[j]); break; case 4: SetPixelIndex(indexes+x,pixels[j]); break; default: break; } q++; j++; } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) { status=MagickFalse; break; } j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, number_channels); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); if (edge_image==NULL) { edge_image=MorphologyApply(image,DefaultChannels,ConvolveMorphology,1, kernel_info,UndefinedCompositeOp,0.0,exception); } kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->width*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=AccelerateConvolveImageChannel(image,DefaultChannels,kernel_info, exception); if (emboss_image == NULL) { emboss_image=MorphologyApply(image,DefaultChannels,ConvolveMorphology,1, kernel_info,UndefinedCompositeOp,0.0,exception); } kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImageChannel(emboss_image,(ChannelType) (AllChannels &~ SyncChannels)); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FilterImage() applies a custom convolution kernel to the image. % % The format of the FilterImage method is: % % Image *FilterImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % Image *FilterImageChannel(const Image *image,const ChannelType channel, % const KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FilterImage(const Image *image,const KernelInfo *kernel, ExceptionInfo *exception) { Image *filter_image; filter_image=FilterImageChannel(image,DefaultChannels,kernel,exception); return(filter_image); } MagickExport Image *FilterImageChannel(const Image *image, const ChannelType channel,const KernelInfo *kernel,ExceptionInfo *exception) { #define FilterImageTag "Filter/Image" CacheView *filter_view, *image_view; Image *filter_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType *filter_kernel; register ssize_t i; ssize_t y; #ifdef MAGICKCORE_CLPERFMARKER clBeginPerfMarkerAMD(__FUNCTION__,""); #endif /* Initialize filter image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); if ((kernel->width % 2) == 0) ThrowImageException(OptionError,"KernelWidthMustBeAnOddNumber"); if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " FilterImage with %.20gx%.20g kernel:",(double) kernel->width,(double) kernel->height); message=AcquireString(""); k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) kernel->width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%g ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } filter_image=AccelerateConvolveImageChannel(image,channel,kernel,exception); if (filter_image != NULL) { #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } filter_image=CloneImage(image,0,0,MagickTrue,exception); if (filter_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(filter_image,DirectClass) == MagickFalse) { InheritException(exception,&filter_image->exception); filter_image=DestroyImage(filter_image); return((Image *) NULL); } /* Normalize kernel. */ filter_kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->width*sizeof(*filter_kernel))); if (filter_kernel == (MagickRealType *) NULL) { filter_image=DestroyImage(filter_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel->width*kernel->width); i++) filter_kernel[i]=(MagickRealType) kernel->values[i]; /* Filter image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); filter_view=AcquireAuthenticCacheView(filter_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,filter_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict filter_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (kernel->width-1)/2L),y- (ssize_t) ((kernel->height-1)/2L),image->columns+kernel->width, kernel->height,exception); q=GetCacheViewAuthenticPixels(filter_view,0,y,filter_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); filter_indexes=GetCacheViewAuthenticIndexQueue(filter_view); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket pixel; register const MagickRealType *restrict k; register const PixelPacket *restrict kernel_pixels; register ssize_t u; ssize_t v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=filter_kernel; kernel_pixels=p; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.red+=(*k)*kernel_pixels[u].red; pixel.green+=(*k)*kernel_pixels[u].green; pixel.blue+=(*k)*kernel_pixels[u].blue; k++; } kernel_pixels+=image->columns+kernel->width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*kernel_pixels[u].opacity; k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=filter_kernel; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.index+=(*k)*GetPixelIndex(kernel_indexes+u); k++; } kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(pixel.index)); } } else { double alpha, gamma; gamma=0.0; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- GetPixelOpacity(kernel_pixels+u))); pixel.red+=(*k)*alpha*GetPixelRed(kernel_pixels+u); pixel.green+=(*k)*alpha*GetPixelGreen(kernel_pixels+u); pixel.blue+=(*k)*alpha*GetPixelBlue(kernel_pixels+u); gamma+=(*k)*alpha; k++; } kernel_pixels+=image->columns+kernel->width; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); if ((channel & OpacityChannel) != 0) { k=filter_kernel; kernel_pixels=p; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { pixel.opacity+=(*k)*GetPixelOpacity(kernel_pixels+u); k++; } kernel_pixels+=image->columns+kernel->width; } SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { register const IndexPacket *restrict kernel_indexes; k=filter_kernel; kernel_pixels=p; kernel_indexes=indexes; for (v=0; v < (ssize_t) kernel->width; v++) { for (u=0; u < (ssize_t) kernel->height; u++) { alpha=(MagickRealType) (QuantumScale*(QuantumRange- kernel_pixels[u].opacity)); pixel.index+=(*k)*alpha*GetPixelIndex(kernel_indexes+u); k++; } kernel_pixels+=image->columns+kernel->width; kernel_indexes+=image->columns+kernel->width; } SetPixelIndex(filter_indexes+x,ClampToQuantum(gamma*pixel.index)); } } indexes++; p++; q++; } sync=SyncCacheViewAuthenticPixels(filter_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FilterImageChannel) #endif proceed=SetImageProgress(image,FilterImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } filter_image->type=image->type; filter_view=DestroyCacheView(filter_view); image_view=DestroyCacheView(image_view); filter_kernel=(MagickRealType *) RelinquishAlignedMemory(filter_kernel); if (status == MagickFalse) filter_image=DestroyImage(filter_image); #ifdef MAGICKCORE_CLPERFMARKER clEndPerfMarkerAMD(); #endif return(filter_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % Image *GaussianBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *blur_image; blur_image=GaussianBlurImageChannel(image,DefaultChannels,radius,sigma, exception); return(blur_image); } MagickExport Image *GaussianBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { char geometry[MaxTextExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); (void) FormatLocaleString(geometry,MaxTextExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=AccelerateConvolveImageChannel(image,channel,kernel_info, exception); if (blur_image==NULL) { blur_image=MorphologyApply(image,channel,ConvolveMorphology,1,kernel_info, UndefinedCompositeOp,0.0,exception); } kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % Image *MotionBlurImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static double *GetMotionBlurKernel(const size_t width,const double sigma) { double *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(double) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { Image *motion_blur; motion_blur=MotionBlurImageChannel(image,DefaultChannels,radius,sigma,angle, exception); return(motion_blur); } MagickExport Image *MotionBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double *kernel; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { MagickPixelPacket qixel; PixelPacket pixel; register const IndexPacket *restrict indexes; register double *restrict k; register ssize_t i; k=kernel; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); qixel.red+=(*k)*pixel.red; qixel.green+=(*k)*pixel.green; qixel.blue+=(*k)*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*(*indexes); } k++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(qixel.index)); } else { double alpha, gamma; alpha=0.0; gamma=0.0; for (i=0; i < (ssize_t) width; i++) { (void) GetOneCacheViewVirtualPixel(image_view,x+offset[i].x,y+ offset[i].y,&pixel,exception); alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(&pixel)); qixel.red+=(*k)*alpha*pixel.red; qixel.green+=(*k)*alpha*pixel.green; qixel.blue+=(*k)*alpha*pixel.blue; qixel.opacity+=(*k)*pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*k)*alpha*GetPixelIndex(indexes); } gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); kernel=(double *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MaxTextExtent], label[MaxTextExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel); if (i == (NumberTiles/2)) { (void) QueryColorDatabase("#dfdfdf",&thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"shear %gx%g", degrees,2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MaxTextExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,100,%g", 2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"100,%g", 2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MaxTextExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor); (void) FormatLocaleString(label,MaxTextExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImageChannel(preview_image,DefaultChannels,gamma); (void) FormatLocaleString(label,MaxTextExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse); (void) FormatLocaleString(label,MaxTextExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image); (void) FormatLocaleString(label,MaxTextExtent,"colors %.20g",(double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MaxTextExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius, (size_t) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MaxTextExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MaxTextExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MaxTextExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MaxTextExtent); break; } case 4: { (void) CopyMagickString(factor,"laplacian",MaxTextExtent); break; } case 5: { (void) CopyMagickString(factor,"Poisson",MaxTextExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MaxTextExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MaxTextExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MaxTextExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail, (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); (void) FormatLocaleString(label,MaxTextExtent,"threshold %g", (double) (percentage*((MagickRealType) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange* percentage/100.0); (void) FormatLocaleString(label,MaxTextExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MaxTextExtent,"shade %gx%g", degrees,degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue); (void) FormatLocaleString(label,MaxTextExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold); (void) FormatLocaleString(label,MaxTextExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees,exception); (void) FormatLocaleString(label,MaxTextExtent,"wave %gx%g", 0.5*degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,exception); (void) FormatLocaleString(label,MaxTextExtent,"paint %g",radius); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MaxTextExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MaxTextExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MaxTextExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MaxTextExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MaxTextExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MaxTextExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MaxTextExtent,"quality %s\n%.20gb ", factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename,MaxTextExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a d i a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RadialBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RadialBlurImage method is: % % Image *RadialBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % Image *RadialBlurImageChannel(const Image *image,const ChannelType channel, % const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o angle: the angle of the radial blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RadialBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { Image *blur_image; blur_image=RadialBlurImageChannel(image,DefaultChannels,angle,exception); return(blur_image); } MagickExport Image *RadialBlurImageChannel(const Image *image, const ChannelType channel,const double angle,ExceptionInfo *exception) { CacheView *blur_view, *image_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType blur_radius, *cos_theta, offset, *sin_theta, theta; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(MagickRealType) (n-1); cos_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(MagickRealType *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (MagickRealType *) NULL) || (sin_theta == (MagickRealType *) NULL)) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(MagickRealType) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const IndexPacket *restrict indexes; register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) blur_image->columns; x++) { MagickPixelPacket qixel; MagickRealType normalize, radius; PixelPacket pixel; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } normalize=0.0; qixel=bias; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); qixel.red+=pixel.red; qixel.green+=pixel.green; qixel.blue+=pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=(*indexes); } normalize+=1.0; } normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(normalize*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(normalize*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(normalize*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(normalize*qixel.index)); } else { double alpha, gamma; alpha=1.0; gamma=0.0; for (i=0; i < (ssize_t) n; i+=(ssize_t) step) { (void) GetOneCacheViewVirtualPixel(image_view,(ssize_t) (blur_center.x+center.x*cos_theta[i]-center.y*sin_theta[i]+0.5), (ssize_t) (blur_center.y+center.x*sin_theta[i]+center.y* cos_theta[i]+0.5),&pixel,exception); alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(&pixel)); qixel.red+=alpha*pixel.red; qixel.green+=alpha*pixel.green; qixel.blue+=alpha*pixel.blue; qixel.opacity+=pixel.opacity; if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewVirtualIndexQueue(image_view); qixel.index+=alpha*(*indexes); } gamma+=alpha; normalize+=1.0; } gamma=PerceptibleReciprocal(gamma); normalize=PerceptibleReciprocal(normalize); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*qixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*qixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*qixel.blue)); if ((channel & OpacityChannel) != 0) SetPixelOpacity(q,ClampToQuantum(normalize*qixel.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*qixel.index)); } q++; } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RadialBlurImageChannel) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); cos_theta=(MagickRealType *) RelinquishMagickMemory(cos_theta); sin_theta=(MagickRealType *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % Image *SelectiveBlurImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { Image *blur_image; blur_image=SelectiveBlurImageChannel(image,DefaultChannels,radius,sigma, threshold,exception); return(blur_image); } MagickExport Image *SelectiveBlurImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; double *kernel; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, width*sizeof(*kernel))); if (kernel == (double *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MaxTextExtent], *message; register const double *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MaxTextExtent,"%+f ",*k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass) == MagickFalse) { kernel=(double *) RelinquishAlignedMemory(kernel); InheritException(exception,&blur_image->exception); blur_image=DestroyImage(blur_image); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace); if (status == MagickFalse) { InheritException(exception,&luminance_image->exception); kernel=(double *) RelinquishAlignedMemory(kernel); blur_image=DestroyImage(blur_image); luminance_image=DestroyImage(luminance_image); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) ((image->columns+width)*((width-1)/2L)+((width-1)/2L)); GetMagickPixelPacket(image,&bias); SetMagickPixelPacketBias(image,&bias); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double gamma; MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict l, *restrict p; register IndexPacket *restrict blur_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=GetCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (l == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); blur_indexes=GetCacheViewAuthenticIndexQueue(blur_view); for (x=0; x < (ssize_t) image->columns; x++) { double contrast; DoublePixelPacket pixel; MagickRealType intensity; register const double *restrict k; register ssize_t u; ssize_t j, v; pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; k=kernel; intensity=GetPixelIntensity(image,p+center); gamma=0.0; j=0; if (((channel & OpacityChannel) == 0) || (image->matte == MagickFalse)) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.red+=(*k)*GetPixelRed(p+u+j); pixel.green+=(*k)*GetPixelGreen(p+u+j); pixel.blue+=(*k)*GetPixelBlue(p+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.opacity+=(*k)*(p+u+j)->opacity; gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelOpacity(q,ClampToQuantum(gamma*pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.index+=(*k)*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } else { MagickRealType alpha; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p+u+j)); pixel.red+=(*k)*alpha*GetPixelRed(p+u+j); pixel.green+=(*k)*alpha*GetPixelGreen(p+u+j); pixel.blue+=(*k)*alpha*GetPixelBlue(p+u+j); pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); gamma+=(*k)*alpha; } k++; } j+=(ssize_t) (image->columns+width); } if (gamma != 0.0) { gamma=PerceptibleReciprocal(gamma); if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(gamma*pixel.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(gamma*pixel.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(gamma*pixel.blue)); } if ((channel & OpacityChannel) != 0) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { pixel.opacity+=(*k)*GetPixelOpacity(p+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { gamma=0.0; j=0; for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,l+u+j)-intensity; if (fabs(contrast) < threshold) { alpha=(MagickRealType) (QuantumScale* GetPixelAlpha(p+u+j)); pixel.index+=(*k)*alpha*GetPixelIndex(indexes+x+u+j); gamma+=(*k); } k++; } j+=(ssize_t) (image->columns+width); } gamma=PerceptibleReciprocal(gamma); SetPixelIndex(blur_indexes+x,ClampToQuantum(gamma*pixel.index)); } } p++; l++; q++; } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImageChannel) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(double *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass) == MagickFalse) { InheritException(exception,&shade_image->exception); linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { MagickRealType distance, normal_distance, shade; PrimaryInfo normal; register const PixelPacket *restrict p, *restrict s0, *restrict s1, *restrict s2; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ s0=p+1; s1=s0+image->columns+2; s2=s1+image->columns+2; for (x=0; x < (ssize_t) linear_image->columns; x++) { /* Determine the surface normal and compute shading. */ normal.x=(double) (GetPixelIntensity(linear_image,s0-1)+ GetPixelIntensity(linear_image,s1-1)+ GetPixelIntensity(linear_image,s2-1)- GetPixelIntensity(linear_image,s0+1)- GetPixelIntensity(linear_image,s1+1)- GetPixelIntensity(linear_image,s2+1)); normal.y=(double) (GetPixelIntensity(linear_image,s2-1)+ GetPixelIntensity(linear_image,s2)+ GetPixelIntensity(linear_image,s2+1)- GetPixelIntensity(linear_image,s0-1)- GetPixelIntensity(linear_image,s0)- GetPixelIntensity(linear_image,s0+1)); if ((normal.x == 0.0) && (normal.y == 0.0)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+normal.z* normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } if (gray != MagickFalse) { SetPixelRed(q,shade); SetPixelGreen(q,shade); SetPixelBlue(q,shade); } else { SetPixelRed(q,ClampToQuantum(QuantumScale*shade*GetPixelRed(s1))); SetPixelGreen(q,ClampToQuantum(QuantumScale*shade*GetPixelGreen(s1))); SetPixelBlue(q,ClampToQuantum(QuantumScale*shade*GetPixelBlue(s1))); } q->opacity=s1->opacity; s0++; s1++; s2++; q++; } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % Image *SharpenImageChannel(const Image *image,const ChannelType channel, % const double radius,const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *sharp_image; sharp_image=SharpenImageChannel(image,DefaultChannels,radius,sigma,exception); return(sharp_image); } MagickExport Image *SharpenImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickSignature; kernel_info->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel_info->width,kernel_info->height*sizeof(*kernel_info->values))); if (kernel_info->values == (double *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(double) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=MorphologyApply(image,channel,ConvolveMorphology,1,kernel_info, UndefinedCompositeOp,0.0,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a block defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: Choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; RandomInfo **restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); spread_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass) == MagickFalse) { InheritException(exception,&spread_image->exception); spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(spread_image,&bias); width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,spread_image,spread_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) spread_image->rows; y++) { const int id = GetOpenMPThreadId(); MagickPixelPacket pixel; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(spread_view); pixel=bias; for (x=0; x < (ssize_t) spread_image->columns; x++) { (void) InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) x+width*(GetPseudoRandomValue( random_info[id])-0.5),(double) y+width*(GetPseudoRandomValue( random_info[id])-0.5),&pixel,exception); SetPixelPacket(spread_image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % Image *UnsharpMaskImageChannel(const Image *image, % const ChannelType channel,const double radius,const double sigma, % const double gain,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { Image *sharp_image; sharp_image=UnsharpMaskImageChannel(image,DefaultChannels,radius,sigma,gain, threshold,exception); return(sharp_image); } MagickExport Image *UnsharpMaskImageChannel(const Image *image, const ChannelType channel,const double radius,const double sigma, const double gain,const double threshold,ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; MagickPixelPacket bias; MagickRealType quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); unsharp_image=BlurImageChannel(image,channel &~ SyncChannels,radius,sigma, exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(MagickRealType) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; GetMagickPixelPacket(image,&bias); image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket pixel; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register IndexPacket *restrict unsharp_indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); unsharp_indexes=GetCacheViewAuthenticIndexQueue(unsharp_view); pixel.red=bias.red; pixel.green=bias.green; pixel.blue=bias.blue; pixel.opacity=bias.opacity; pixel.index=bias.index; for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) { pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); if (fabs(2.0*pixel.red) < quantum_threshold) pixel.red=(MagickRealType) GetPixelRed(p); else pixel.red=(MagickRealType) GetPixelRed(p)+(pixel.red*gain); SetPixelRed(q,ClampToQuantum(pixel.red)); } if ((channel & GreenChannel) != 0) { pixel.green=GetPixelGreen(p)-(MagickRealType) q->green; if (fabs(2.0*pixel.green) < quantum_threshold) pixel.green=(MagickRealType) GetPixelGreen(p); else pixel.green=(MagickRealType) GetPixelGreen(p)+(pixel.green*gain); SetPixelGreen(q,ClampToQuantum(pixel.green)); } if ((channel & BlueChannel) != 0) { pixel.blue=GetPixelBlue(p)-(MagickRealType) q->blue; if (fabs(2.0*pixel.blue) < quantum_threshold) pixel.blue=(MagickRealType) GetPixelBlue(p); else pixel.blue=(MagickRealType) GetPixelBlue(p)+(pixel.blue*gain); SetPixelBlue(q,ClampToQuantum(pixel.blue)); } if ((channel & OpacityChannel) != 0) { pixel.opacity=GetPixelOpacity(p)-(MagickRealType) q->opacity; if (fabs(2.0*pixel.opacity) < quantum_threshold) pixel.opacity=(MagickRealType) GetPixelOpacity(p); else pixel.opacity=GetPixelOpacity(p)+(pixel.opacity*gain); SetPixelOpacity(q,ClampToQuantum(pixel.opacity)); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel.index=GetPixelIndex(indexes+x)-(MagickRealType) GetPixelIndex(unsharp_indexes+x); if (fabs(2.0*pixel.index) < quantum_threshold) pixel.index=(MagickRealType) GetPixelIndex(indexes+x); else pixel.index=(MagickRealType) GetPixelIndex(indexes+x)+ (pixel.index*gain); SetPixelIndex(unsharp_indexes+x,ClampToQuantum(pixel.index)); } p++; q++; } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImageChannel) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
error-correct.c
/** * Coral: short reads error correction with multiple alignments * Copyright (C) 2011 Leena Salmela <[email protected]> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> #include "reverse.h" #include "multi-align.h" #include "q-gram-index-ht.h" void usage(char *prog_name) { printf("Usage: %s -f[q,s] <input file> -o <output file> [options]\n", prog_name); printf("Required parameters:\n"); printf("-f or -fq or -fs <file> Use -f for a fasta file, -fq for standard fastq file\n"); printf(" and -fs for Solexa fasta file\n"); printf("-o <file> Output file for corrected reads. Format is fasta if\n"); printf(" the input file is fasta and fastq if the input file\n"); printf(" is fastq. The output file cannot be the same file as\n"); printf(" the input file.\n\n"); /* printf("-n <int> The number of reads in the input file.\n\n");*/ printf("Quick options for sequencing technologies:\n"); printf("-454 Equal to: -mr %d -mm %d -g %d\n", MATCH_REWARD_454, MM_PENALTY_454, GAP_PENALTY_454); printf("-illumina Equal to: -mr %d -mm %d -g %d\n\n", MATCH_REWARD_ILLUMINA, MM_PENALTY_ILLUMINA, GAP_PENALTY_ILLUMINA); printf("Other options:\n"); printf("-k <int> The length of a k-mer for indexing the reads. [%d]\n", DEFAULT_Q); /* printf("-m <int> Maximum number of different k-mers indexed (for memory\n"); */ /* printf(" allocation) [%d]\n", MAX_NUM_QGRAMS); */ printf("-e <float> Maximum allowed error rate of a read in the multiple\n"); printf(" alignment. [%.2f]\n", MAX_ERROR_RATE); printf("-t <float> The minimum proportion of agreeing reads to consider\n"); printf(" the consensus of a multiple alignment trustworthy.\n"); printf(" [%.2f]\n", THRESHOLD); printf("-q <float> A threshold for proportion of quality values for bases\n"); printf(" agreeing with the consensus to consider the consensus\n"); printf(" trustworthy. Not used if input is fasta. [%.2f]\n", QUALITY_THRESHOLD); printf("-cq [<int>] If the integer parameter is not present, old quality\n"); printf(" scores are retained for corrected mismatches. If the\n"); printf(" integer parameter is present, the quality scores of\n"); printf(" all corrected bases are set to that value. By default\n"); printf(" quality scores of corrected bases are computed as\n"); printf(" explained in the paper.\n"); printf("-a <int> Maximum number of reads to compute multiple alignments\n"); printf(" for. [%d]\n", MAX_ALIGNED_READS); printf("-p <int> Number of threads to use. [%d]\n", NUM_THREADS); printf("-r <int> The number of times the k-mer index is built during a\n"); printf(" correction run.\n"); printf("-i <int> Index only the first <int> reads. By default all\n"); printf(" reads are indexed.\n"); printf("-j <int> Do not correct the first <int> reads. By default all\n"); printf(" reads are corrected.\n"); printf("-s <file> Write statistics of computed alignments to a file. By\n"); printf(" default statistics are not written.\n"); printf("-c <file> Write the consensus sequences of computed alignments\n"); printf(" to a file. By default the consensus sequences are not\n"); printf(" written.\n"); printf("-mr <int> Reward for matching bases when computing alignments.\n"); printf(" [%d]\n", MATCH_REWARD); printf("-mm <int> Penalty for mismatches when computing alignments. [%d]\n", MM_PENALTY); printf("-g <int> Gap penalty when computing alignments. If gap penalty\n"); printf(" is higher than 100 times mismatch penalty, it is\n"); printf(" assumed that no gaps will occur. [%d]\n", GAP_PENALTY); } #define BUF_SIZE 65536 #define MAX_DUPL_READS 10000 void correct_errors(uchar **reads, int *read_length, char **qual, int64_t num_reads, int q, int max_grams, int p, uchar *num_edits, double max_error_rate, double threshold, double quality_threshold, int max_aligned_reads, int match_reward, int mm_penalty, int gap_penalty, char *stat_file, char *consensus_file, int corrected_quality_scheme, int corrected_quality, int rebuilds, int64_t indexed_reads, int64_t min_correct_id) { char *line, *line2, *lineq, *lineq2; int64_t i, j, k; int64_t ii, jj; FILE *f; FILE *sf; int tid; /* Thread id of the current thread */ int dist; /* Edit distance between the original and corrected read */ read_pos *align_pos; /* Arguments to alignment calculation */ char *pos; char *consensus; /* Consensus sequence returned by alignment calculation */ int clen; /* Length of consensus */ align *alignment; /* Data structures for alignment calculation */ double quality; /* Quality of an alignment */ int share; /* Number of reads that share a k-mer with the base read */ int64_t num_good = 0; /* Number of alignments that were used for correction */ int64_t num_bad = 0; /* Number of alignments that were not good enough for correction */ int64_t num_perfect = 0; /* Number of alignments with no disagreeing columns */ char *used_reads; /* For recording which reads have been used to build a consensus */ int bits; uint64_t mask; /* Bit mask for masking the succinct representations */ uint64_t gram; /* Succinct representation of a q-gram */ uint64_t gram_reverse; /* Succinct representation of the reverse of a q-gram */ uint64_t current; int lastN; index_t *index; int iter; char *aligned_reads; char *corrected_reads; int part; for(i = 0; i < num_reads; i++) { num_edits[i] = 0; } aligned_reads = (char *)malloc(num_reads*sizeof(char)); corrected_reads = (char *)malloc(num_reads*sizeof(char)); if (aligned_reads == NULL || corrected_reads == NULL) { printf("Malloc failed\n"); exit(1); } for(i = 0; i < num_reads; i++) { aligned_reads[i] = 0; corrected_reads[i] = 0; } if (consensus_file != NULL) { f = fopen(consensus_file, "w"); if (!f) { printf("Could not open consensus file %s\n", consensus_file); exit(1); } used_reads = (char *)malloc(num_reads*sizeof(char)); if (used_reads == NULL) { printf("Malloc failed\n"); exit(1); } for(i = 0; i < num_reads; i++) used_reads[i] = 0; } if (stat_file != NULL) { sf = fopen(stat_file, "w"); if (!sf) { printf("Could not open statistics fille %s\n", stat_file); exit(1); } } omp_set_num_threads(p); for(iter = 0; iter < rebuilds; iter++) { if (iter > 0) free_index(index); i = build_index(reads, indexed_reads, q, (void **)&index, (uchar *)PREFIX, max_grams, 1/*(int)(1.0/(1.0-threshold))*/, max_aligned_reads); if (i != 0) { printf("Building q-gram index failed: %s\n", error_index(i)); exit(1); } printf("Q-gram index built.\n"); printf("Correcting reads\n"); #pragma omp parallel private(line, line2, lineq, lineq2, i, j, k, align_pos, pos, consensus, clen, ii, jj, dist, alignment, tid, quality, gram, gram_reverse, lastN, current, bits, mask, share, part) { bits = 2; mask = ((uint64_t)1 << (2*index->q))-1; #pragma omp critical { /* Is malloc thread safe? */ align_pos = (read_pos *)malloc(MAX_DUPL_READS*sizeof(read_pos)); alignment = (align *)malloc(sizeof(align)); line = (char *)malloc(BUF_SIZE*sizeof(char)); line2 = (char *)malloc(BUF_SIZE*sizeof(char)); if (qual != NULL) { lineq = (char *)malloc(BUF_SIZE*sizeof(char)); lineq2 = (char *)malloc(BUF_SIZE*sizeof(char)); } else { lineq = NULL; lineq2 = NULL; } for(i = 0; i < MAX_DUPL_READS; i++) { align_pos[i].edited_read = (char *)malloc(MAX_READ_LENGTH*sizeof(char)); if (qual != NULL) { align_pos[i].edited_qual = (char *)malloc(MAX_READ_LENGTH*sizeof(char)); } } for(i = 0; i < MAX_CONTIG_LEN+10; i++) { alignment->dp[i][0] = 0; alignment->dp_trace[i][0] = 'J'; } for(j = 0; j < MAX_CONTIG_LEN+10; j++) { alignment->dp[0][j] = 0; alignment->dp_trace[0][j] = 'I'; } } tid = omp_get_thread_num(); p = omp_get_num_threads(); printf("Thread %d/%d starting\n", tid, p); /* How to split the work between the threads: * - Threads should not work at the same time with ids close to * each other because if so the likelihood of two threads * working on the same read will be higher. */ for(i = iter*(int64_t)num_reads/rebuilds+tid*10000; i < (iter+1)*(int64_t)num_reads/rebuilds; i++) { if (i >= min_correct_id && num_edits[i] <= max_error_rate*strlen((char *)reads[i])) { gram = 0; gram_reverse = 0; lastN = -1; /* Initialize so that we will index the first q-gram only after reading q chars */ for (part = 0; part < (signed int)strlen((char *)reads[i]); part +=50) { jj = 0; for(j = part; j < part+50 && j < (signed int)strlen((char *)reads[i]); j++) { if (index->reads[i][j] == 'N' || index->reads[i][j] == 'n') { lastN = j; } switch(index->reads[i][j]) { case 'A': case 'a': gram = gram << bits | CODE_A; gram_reverse = gram_reverse >> bits | ((uint64_t)CODE_T << ((index->q-1)*bits)); break; case 'C': case 'c': gram = gram << bits | CODE_C; gram_reverse = gram_reverse >> bits | ((uint64_t)CODE_G << ((index->q-1)*bits)); break; case 'G': case 'g': gram = gram << bits | CODE_G; gram_reverse = gram_reverse >> bits | ((uint64_t)CODE_C << ((index->q-1)*bits)); break; case 'T': case 't': gram = gram << bits | CODE_T; gram_reverse = gram_reverse >> bits | ((uint64_t)CODE_A << ((index->q-1)*bits)); break; default: gram = gram << bits; gram_reverse = gram_reverse >> bits; break; } gram = gram & mask; gram_reverse = gram_reverse & mask; if (lastN > j-index->q || gram_reverse == gram) continue; if (gram_reverse < gram) { current = gram_reverse; } else { current = gram; } /* decipher the q-gram (forward and reverse representations, line and line2) */ for(k = 0; k < index->q; k++) { switch((gram >> 2*(index->q-k-1)) & 0x03) { case 0: line[k] = 'A'; line2[index->q-k-1] = 'T'; break; case 1: line[k] = 'C'; line2[index->q-k-1] = 'G'; break; case 2: line[k] = 'G'; line2[index->q-k-1] = 'C'; break; case 3: line[k] = 'T'; line2[index->q-k-1] = 'A'; break; } } line[index->q] = '\0'; line2[index->q] = '\0'; #pragma omp critical { k = (*index->gram2id)[current]-1; } if (k < 0) continue; if (i >= indexed_reads && jj < MAX_DUPL_READS) { /* This read is not in the index so we add it here. */ align_pos[jj].read = i; /* q-gram is always in forward orientation (however, because of using parts of the read as base read, the read might already be corrected and then we cannot find the q-gram anymore) */ pos = strstr((char *)reads[i], line); if (pos != NULL) { align_pos[jj].ori = 'U'; align_pos[jj].pos = (j-index->q+1)- ((long long int)pos -(long long int)reads[i]); align_pos[jj].occ = 1; pos = strstr(pos+1, line); while(pos != NULL) { align_pos[jj].occ++; pos = strstr(pos+1, line); } jj++; } } /* initiliaze input for alignment calculation */ for(ii = 0; ii < (signed int)index->counts[k] && jj < MAX_DUPL_READS; ii++) { if (num_edits[index->gram_reads[k][ii]] > max_error_rate*strlen((char *)reads[index->gram_reads[k][ii]])) continue; align_pos[jj].read = index->gram_reads[k][ii]; /* Search for the q-gram in forward orientation */ pos = strstr((char *)reads[index->gram_reads[k][ii]], line); if (pos != NULL) { align_pos[jj].ori = 'U'; align_pos[jj].pos = (j-index->q+1)- ((long long int)pos -(long long int)reads[index->gram_reads[k][ii]]); align_pos[jj].occ = 1; while(ii + 1 < (signed int)index->counts[k] && index->gram_reads[k][ii+1] == index->gram_reads[k][ii]) { ii++; align_pos[jj].occ++; } jj++; } else { /* Forward q-gram not found, search for the reverse complement */ pos = strstr((char *)reads[index->gram_reads[k][ii]], line2); if (pos != NULL) { align_pos[jj].ori = 'C'; align_pos[jj].pos = (j-index->q+1) - (strlen((const char *)reads[index->gram_reads[k][ii]]) - ((int64_t)pos - (int64_t)reads[index->gram_reads[k][ii]]) - index->q); align_pos[jj].occ = 1; while(ii + 1 < (signed int)index->counts[k] && index->gram_reads[k][ii+1] == index->gram_reads[k][ii]) { ii++; align_pos[jj].occ++; } jj++; } } } } j = jj; if (j >= 1.0/(1.0-threshold) && j < MAX_DUPL_READS) { #pragma omp critical { for(ii = 0; ii < j; ii++) { aligned_reads[align_pos[ii].read] = 1; } } /* Compute multiple alignment */ j = multi_align(alignment, (char **)reads, qual, align_pos, j, max_error_rate, max_aligned_reads, match_reward, mm_penalty, gap_penalty); for(k = 0; k < j; k++) if (align_pos[k].read == (ulong) i) break; clen = get_consensus(alignment, &consensus); consensus[clen] = '\0'; if (k >= j) { quality = 0.0; share = 0; } else { share = kalign_share(alignment, align_pos, j, index->q, k, (char **)reads); } if (share != j && share > 0) { /* Recompute multiple alignment */ j = multi_align(alignment, (char **)reads, qual, align_pos, share, max_error_rate, max_aligned_reads, match_reward, mm_penalty, gap_penalty); for(k = 0; k < j; k++) if (align_pos[k].read == (ulong) i) break; clen = get_consensus(alignment, &consensus); consensus[clen] = '\0'; if (k >= j) { quality = 0.0; share = 0; } else { share = kalign_share(alignment, align_pos, j, index->q, k, (char **)reads); } } if (share == j) { quality = align_quality(alignment, align_pos, j); } else { quality = 0.0; } #pragma omp critical { if (stat_file != NULL) { snprintf(line2, BUF_SIZE, "%ld\t%f\t%d\t%ld\n", i, 1.0-quality, share, j); if (!fwrite(line2, sizeof(char), strlen(line2), sf)) { printf("Could not write to statistics file\n"); } } /* update statistics */ if (quality >= 1.0) { num_perfect++; } else if (quality > 1.0-max_error_rate) { num_good++; } else { num_bad++; } } #ifdef DEBUG #pragma omp critical { /* if (quality > 1.0-max_error_rate && quality < 1.0 && share == j) { */ if (quality < 1.0) { printf("******************\n"); printf("%d: %s\n", i, reads[i]); printf(" %s\n", consensus); for(k = 0; k < j; k++) { printf("%c ", align_pos[k].ori); for(ii = 0; ii < align_pos[k].pos; ii++) { printf(" "); } if (align_pos[k].ori == 'C') { reverse(align_pos[k].edited_read, line2); printf("%s\n", line2); } else { printf("%s\n", align_pos[k].edited_read); } } if (qual != NULL) { for(k = 0; k < j; k++) { printf("%c ", align_pos[k].ori); for(ii = 0; ii < align_pos[k].pos; ii++) { printf(" "); } if (align_pos[k].ori == 'C') { for(ii = strlen(align_pos[k].edited_read)-1; ii >= 0; ii--) { printf("%d ", align_pos[k].edited_qual[ii]); } printf("\n"); } else { for(ii = 0; ii < (signed)strlen(align_pos[k].edited_read); ii++) { printf("%d ", align_pos[k].edited_qual[ii]); } printf("\n"); } } } } printf("OK: %d, Quality: %f Share: %d/%d\n", alignment->ok, quality, share, j); } #endif if (quality > 1.0-max_error_rate && share == j) { #pragma omp critical { for(ii = 0; ii < j; ii++) { corrected_reads[align_pos[ii].read] = 1; } } } if (quality > 1.0-max_error_rate && quality < 1.0 && share == j) { /* Alignment has an appropriate quality for correction */ clen = get_consensus(alignment, &consensus); /* Quality values for the consensus */ int cons_qual[2*MAX_READ_LENGTH+1]; int tot_qual[2*MAX_READ_LENGTH+1]; if (qual != NULL) { for(k = 0; k < clen; k++) { cons_qual[k] = 0; tot_qual[k] = 0; } for(k = 0; k < j; k++) { /* Copy to line2 a representation of the edited read in the same orientation as the consensus */ if (align_pos[k].ori == 'U') { strcpy(line2, align_pos[k].edited_read); if (qual != NULL) memcpy(lineq2, align_pos[k].edited_qual, strlen(line2)); } else { reverse(align_pos[k].edited_read, line2); if (qual != NULL) { for(jj = 0; jj < (signed)strlen(line2); jj++) lineq2[strlen(line2)-jj-1] = align_pos[k].edited_qual[jj]; lineq2[strlen(line2)] = '\0'; } } for(ii = 0; ii < (signed int)strlen(line2); ii++) { if (consensus[align_pos[k].pos + ii] == line2[ii]) { cons_qual[align_pos[k].pos + ii] += lineq2[ii]; } tot_qual[align_pos[k].pos + ii] += lineq2[ii]; } } } for(k = 0; k < j; k++) { if ((signed)align_pos[k].read <= min_correct_id) { continue; } if (align_pos[k].edits > 0) { /* Copy to line2 a representation of the edited read in the same orientation as the consensus */ if (align_pos[k].ori == 'U') { strcpy(line2, align_pos[k].edited_read); if (qual != NULL) memcpy(lineq2, align_pos[k].edited_qual, strlen(line2)); } else { reverse(align_pos[k].edited_read, line2); if (qual != NULL) { for(jj = 0; jj < (signed)strlen(line2); jj++) lineq2[strlen(line2)-jj-1] = align_pos[k].edited_qual[jj]; lineq2[strlen(line2)] = '\0'; } } #ifdef DEBUG printf("Original read %s\n", reads[align_pos[k].read]); #endif jj = 0; dist = 0; for(ii = 0; ii < (signed int)strlen(line2); ii++) { int consC; /* Number of reads supporting consensus at this position */ //int editC; /* Number of reads supporting this read at this position */ switch(consensus[align_pos[k].pos+ii]) { case 'A': case 'a': consC = alignment->contigA[align_pos[k].pos+ii]; break; case 'C': case 'c': consC = alignment->contigC[align_pos[k].pos+ii]; break; case 'G': case 'g': consC = alignment->contigG[align_pos[k].pos+ii]; break; case 'T': case 't': consC = alignment->contigT[align_pos[k].pos+ii]; break; case 'N': case 'n': case '-': consC = alignment->contigN[align_pos[k].pos+ii]; break; default: consC = 0; break; } /* switch(line2[ii]) { */ /* case 'A': */ /* case 'a': */ /* editC = alignment->contigA[align_pos[k].pos+ii]; */ /* break; */ /* case 'C': */ /* case 'c': */ /* editC = alignment->contigC[align_pos[k].pos+ii]; */ /* break; */ /* case 'G': */ /* case 'g': */ /* editC = alignment->contigG[align_pos[k].pos+ii]; */ /* break; */ /* case 'T': */ /* case 't': */ /* editC = alignment->contigT[align_pos[k].pos+ii]; */ /* break; */ /* case 'N': */ /* case 'n': */ /* case '-': */ /* editC = alignment->contigN[align_pos[k].pos+ii]; */ /* break; */ /* default: */ /* editC = 0; */ /* break; */ /* } */ if ((double)consC / (double)(alignment->contigA[align_pos[k].pos+ii]+ alignment->contigC[align_pos[k].pos+ii]+ alignment->contigG[align_pos[k].pos+ii]+ alignment->contigT[align_pos[k].pos+ii]+ alignment->contigN[align_pos[k].pos+ii]) >= threshold && (qual == NULL? 1 : (double)cons_qual[align_pos[k].pos+ii]/(double)tot_qual[align_pos[k].pos+ii] >= quality_threshold) /*cons_qual[align_pos[k].pos+ii] - lineq2[ii] > quality_threshold)*/) { if (line2[ii] != consensus[align_pos[k].pos+ii]) dist++; if (consensus[align_pos[k].pos+ii] != '-') { line[jj] = consensus[align_pos[k].pos+ii]; if (qual != NULL) { if (line2[ii] == consensus[align_pos[k].pos+ii]) { lineq[jj] = lineq2[ii]; } else { if (corrected_quality_scheme == FLAT) { lineq[jj] = corrected_quality; } else if (corrected_quality_scheme == ESTIMATE || line2[ii] == '-') { lineq[jj] = cons_qual[align_pos[k].pos+ii]/consC; } else { lineq[jj] = lineq2[ii]; } } } jj++; } } else { if (line2[ii] != '-') { line[jj] = line2[ii]; if (qual != NULL) lineq[jj] = lineq2[ii]; jj++; } } } line[jj] = '\0'; if (qual != NULL) lineq[jj] = '\0'; // Check that the corrected read fits into the array if ((int)strlen(line) <= read_length[align_pos[k].read] + MAX_INSERTIONS) { /* Copy the original read in the original orientation to the reads array */ #pragma omp critical { if (align_pos[k].ori == 'U') { strcpy((char *)reads[align_pos[k].read], line); if (qual != NULL) memcpy(qual[align_pos[k].read], lineq, strlen(line)); } else { reverse(line, (char *)reads[align_pos[k].read]); if (qual != NULL) { for(jj = 0; jj < (signed)strlen(line); jj++) { qual[align_pos[k].read][strlen(line)-jj-1] = lineq[jj]; } qual[align_pos[k].read][strlen(line)] = '\0'; } } #ifdef DEBUG printf("Corrected read %s\n", reads[align_pos[k].read]); #endif num_edits[align_pos[k].read] += dist; } } } } } else { j = share; } if (quality > 1.0-max_error_rate) { #pragma omp critical { /* Write consensus sequence to file */ if (consensus_file != NULL) { for(ii = 0, jj = 0; jj < (int64_t)strlen(consensus); jj++) { if (consensus[jj] != '-') { consensus[ii] = consensus[jj]; ii++; } } consensus[ii] = '\0'; snprintf(line, BUF_SIZE, ">consensus_%ld (%f)\n%s\n", i, quality, consensus); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Writing to consensus file failed\n"); } for(k = 0; k < j; k++) { used_reads[align_pos[k].read] = 1; } } } } } } } /* Print some hint of progress */ if (i % 10000 == 9999) { printf("Thread %d: %ld/%ld Perfect alignments: %ld, Good alignments: %ld, Bad alignments: %ld\n", tid, i, num_reads, num_perfect, num_good, num_bad); /* printf("Thread %d: %d/%d\n", tid, i, num_reads); */ i += (p-1)*10000; } } #pragma omp critical { for(i = 0; i < MAX_DUPL_READS; i++) { free(align_pos[i].edited_read); if (qual != NULL) free(align_pos[i].edited_qual); } free(align_pos); free(alignment); free(line); free(line2); if (qual != NULL) { free(lineq); free(lineq2); } printf("Thread %d/%d finishing\n", tid, p); } } // end omp parallel } printf("Perfect alignments: %ld, Good alignments: %ld, Bad alignments: %ld\n", num_perfect, num_good, num_bad); int num_aligned = 0; int num_corrected = 0; for(i = 0; i < num_reads; i++) { if (aligned_reads[i] > 0) num_aligned++; if (corrected_reads[i] > 0) num_corrected++; } printf("%d reads were aligned, %d reads were aligned in a good alignment\n", num_aligned, num_corrected); if (stat_file != NULL) fclose(sf); line = (char *)malloc(BUF_SIZE*sizeof(char)); if (consensus_file != NULL) { for(i = 0; i < num_reads; i++) { if (!used_reads[i]) { snprintf(line, BUF_SIZE, ">unused_read_%ld\n%s\n", i, reads[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to consensus file\n"); } } } fclose(f); } } /** * Main routine of error correction. * Command line options: * -f[q] <input file> * -o <output file> * -n <number of reads> * -k <k-mer length> * -m <maximum number of different k-mers> * -e <max error rate for alignment> * -t <minimum proportion of agreeing reads to consider the consensus trustworthy> * -q <threshold for the sum of quality values of bases agreeing with consensus minus * the quality of the base to correct> * -a <maximum number of reads to compute multiple alignments for> * -p <number of threads to use> * -pr <prefix for considered q-grams> (flag not implemented) * -s <statistics file> * -c <consensus file> * -r <number of rebuilds> */ int main(int argc, char *argv[]) { char *fasta_file = NULL; char *fastq_file = NULL; int64_t num_reads = 0; char *out_file = NULL; char *stat_file = NULL; char *consensus_file = NULL; uchar **reads; char **qual; int *read_length; char *line = (char *)malloc(BUF_SIZE*sizeof(char)); char *line2 = (char *)malloc(BUF_SIZE*sizeof(char)); int64_t i,k,j,jj; int64_t ii; FILE *f; FILE *of; uchar *buf4reads = NULL; char *buf4qual = NULL; int c; int p = NUM_THREADS; /* Number of threads to use */ /* Number of edit operations performed on each read */ uchar *num_edits; int q = DEFAULT_Q; /* The length of a q-gram */ char *prog_name = argv[0]; uint64_t max_grams = MAX_NUM_QGRAMS; double max_error_rate = MAX_ERROR_RATE; double threshold = THRESHOLD; double quality_threshold = QUALITY_THRESHOLD; int max_aligned_reads = MAX_ALIGNED_READS; int match_reward = MATCH_REWARD; int mm_penalty = MM_PENALTY; int gap_penalty = GAP_PENALTY; int corrected_quality_scheme = ESTIMATE; int corrected_quality = 0; int quality_offset = 64; int rebuilds = 1; int64_t indexed_reads = -1; int64_t min_correct_id = -1; while(argc > 0) { if (!strcmp(argv[0], "-f")) { if (argc > 1) { fasta_file = argv[1]; fastq_file = NULL; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-fq")) { if (argc > 1) { fastq_file = argv[1]; fasta_file = NULL; quality_offset = 33; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-fs")) { if (argc > 1) { fastq_file = argv[1]; fasta_file = NULL; quality_offset = 64; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-o")) { if (argc > 1) { out_file = argv[1]; } else { usage(prog_name); return 1; } argc--; argv++; /* } else if (!strcmp(argv[0], "-n")) { */ /* if (argc > 1) { */ /* num_reads = atoi(argv[1]); */ /* } else { */ /* usage(prog_name); */ /* return 1; */ /* } */ /* argc--; */ /* argv++; */ } else if (!strcmp(argv[0], "-k")) { if (argc > 1) { q = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-p")) { if (argc > 1) { p = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; /* } else if (!strcmp(argv[0], "-m")) { */ /* if (argc > 1) { */ /* max_grams = atol(argv[1]); */ /* } else { */ /* usage(prog_name); */ /* return 1; */ /* } */ /* argc--; */ /* argv++; */ } else if (!strcmp(argv[0], "-e")) { if (argc > 1) { max_error_rate = atof(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-t")) { if (argc > 1) { threshold = atof(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-q")) { if (argc > 1) { quality_threshold = atof(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-cq")) { if (argc > 1 && argv[1][0] != '-') { corrected_quality_scheme = FLAT; corrected_quality = atoi(argv[1]); argc--; argv++; } else { corrected_quality_scheme = KEEP; } } else if (!strcmp(argv[0], "-a")) { if (argc > 1) { max_aligned_reads = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-c")) { if (argc > 1) { consensus_file = argv[1]; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-s")) { if (argc > 1) { stat_file = argv[1]; } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-mr")) { if (argc > 1) { match_reward = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-mm")) { if (argc > 1) { mm_penalty = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-g")) { if (argc > 1) { gap_penalty = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-r")) { if (argc > 1) { rebuilds = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-i")) { if (argc > 1) { indexed_reads = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-j")) { if (argc > 1) { min_correct_id = atoi(argv[1]); } else { usage(prog_name); return 1; } argc--; argv++; } else if (!strcmp(argv[0], "-454")) { match_reward = MATCH_REWARD_454; mm_penalty = MM_PENALTY_454; gap_penalty = GAP_PENALTY_454; } else if (!strcmp(argv[0], "-illumina")) { match_reward = MATCH_REWARD_ILLUMINA; mm_penalty = MM_PENALTY_ILLUMINA; gap_penalty = GAP_PENALTY_ILLUMINA; } argc--; argv++; } if (out_file == NULL/* || num_reads == 0*/) { usage(prog_name); return 1; } if ((fasta_file != NULL && strcmp(out_file, fasta_file) == 0)|| (fastq_file != NULL && strcmp(out_file, fastq_file) == 0)) { printf("The input and output files must not be the same file!\n"); usage(prog_name); return 1; } i = 0; k = BUF_SIZE; if (fasta_file != NULL) { qual = NULL; printf("Counting reads in file %s\n", fasta_file); f = fopen(fasta_file, "r"); if (f == NULL) { printf("Could not open file %s\n", fasta_file); abort(); } c = fgetc(f); while(c != EOF) { if (c != '>') { printf("Fasta file %s has invalid format\n", fasta_file); abort(); } num_reads++; while((c = fgetc(f)) != '\n' && c != EOF); while((c = fgetc(f)) != '>' && c != EOF); } printf("Found %ld reads\n", num_reads); if (indexed_reads < 0 || indexed_reads > num_reads) indexed_reads = num_reads; reads = (uchar **)malloc(num_reads * sizeof(uchar *)); read_length = (int *)malloc(num_reads * sizeof(int)); num_edits = (uchar *)malloc(num_reads * sizeof(uchar)); printf("Reading reads from file %s\n", fasta_file); /* Read the reads from the file */ f = fopen(fasta_file, "r"); if (f == NULL) { printf("Could not open file %s\n", fasta_file); abort(); } c = fgetc(f); while(c != EOF) { j = 0; line[j++] = c; while((c = fgetc(f)) != '\n' && c != EOF && j < BUF_SIZE) { line[j++] = c; } line[j] = '\0'; if (j >= BUF_SIZE || c == EOF || line[0] != '>') { printf("Fasta file %s has invalid format\n", fasta_file); abort(); } j = 0; while((c = fgetc(f)) != '>' && c != EOF && j < BUF_SIZE) { if (c != '\n') line[j++] = c; } line[j] = '\0'; if (j >= BUF_SIZE) { printf("Read length exceeds the maximum.\n"); abort(); } if (j+MAX_INSERTIONS+1+k > BUF_SIZE) { buf4reads = (uchar *)malloc(BUF_SIZE*sizeof(uchar)); if (buf4reads == NULL) { printf("Could not malloc space for reads\n"); abort(); } k = 0; } strncpy((char *)&buf4reads[k], line, j+1); reads[i] = &buf4reads[k]; read_length[i] = j; i++; k += j+1; for(ii = 0; ii < MAX_INSERTIONS; ii++) { buf4reads[k++] = '\0'; } #ifdef DEBUG_Q printf("Original: %s\n", reads[i-1]); #endif } fclose(f); printf("Found %ld reads\n", i); } else if (fastq_file != NULL) { printf("Counting reads in file %s\n", fastq_file); f = fopen(fastq_file, "r"); if (f == NULL) { printf("Could not open file %s\n", fastq_file); abort(); } c = fgetc(f); while(c != EOF) { if (c != '@') { printf("Fastq file %s has invalid format\n", fastq_file); abort(); } num_reads++; /*comment*/ while((c = fgetc(f)) != '\n' && c != EOF); /*read*/ while((c = fgetc(f)) != '\n' && c != EOF); /*comment*/ while((c = fgetc(f)) != '\n' && c != EOF); /*qualities*/ while((c = fgetc(f)) != '\n' && c != EOF); c = fgetc(f); } printf("Found %ld reads\n", num_reads); if (indexed_reads < 0 || indexed_reads > num_reads) indexed_reads = num_reads; reads = (uchar **)malloc(num_reads * sizeof(uchar *)); read_length = (int *)malloc(num_reads * sizeof(int)); num_edits = (uchar *)malloc(num_reads * sizeof(uchar)); qual = (char **)malloc(num_reads * sizeof(char *)); printf("Reading reads from file %s\n", fastq_file); /* Read the reads from the file */ f = fopen(fastq_file, "r"); if (f == NULL) { printf("Could not open file %s\n", fastq_file); return 1; } while((c = fgetc(f)) != EOF) { if (c != '@') { printf("Fastq file %s has invalid format\n", fastq_file); exit(1); } /* Skip read id */ while(c != '\n' && c != EOF) c = fgetc(f); /* Bases */ j = 0; while((c = fgetc(f)) != '\n') {if (c == EOF) break; line[j++] = c;} line[j] = '\0'; /* Skip the separating line */ if (fgetc(f) != '+') { printf("Fastq file %s has invalid format\n", fastq_file); exit(1); } while(fgetc(f) != '\n'); /* Qualities */ jj = 0; while((c = fgetc(f)) != '\n') { if (c == EOF) break; line2[jj++] = c-quality_offset; } line2[jj] = '\0'; if (j != jj) { printf("Length of bases and qualities not the same: %ld, %ld\n", j, jj); exit(1); } if (j+MAX_INSERTIONS+1+k > BUF_SIZE) { buf4reads = (uchar *)malloc(BUF_SIZE*sizeof(uchar)); buf4qual = (char *)malloc(BUF_SIZE*sizeof(char)); if (buf4reads == NULL || buf4qual == NULL) { printf("Could not malloc space for reads\n"); abort(); } k = 0; } strncpy((char *)&buf4reads[k], line, j+1); memcpy((char *)&buf4qual[k], line2, j+1); //strncpy((char *)&buf4qual[k], line2, j+1); reads[i] = &buf4reads[k]; qual[i] = &buf4qual[k]; read_length[i] = j; i++; k += j+1; for(ii = 0; ii < MAX_INSERTIONS; ii++) { buf4reads[k] = '\0'; buf4qual[k++] = '\0'; } #ifdef DEBUG_Q printf("Original: %s\n", reads[i-1]); printf(" "); for(jj= 0; jj < j; jj++) printf("%d ", qual[i-1][jj]); printf("\n"); #endif } fclose(f); printf("Found %ld reads\n", i); } else { usage(prog_name); abort(); } correct_errors(reads, read_length, qual, num_reads, q, max_grams, p, num_edits, max_error_rate, threshold, quality_threshold, max_aligned_reads, match_reward, mm_penalty, gap_penalty, stat_file, consensus_file, corrected_quality_scheme, corrected_quality, rebuilds, indexed_reads, min_correct_id); print_stats(); printf("Correction finished. Outputting corrected reads.\n"); if (fasta_file) { f = fopen(out_file, "w"); /* Open the original file to read the read names from there */ of = fopen(fasta_file, "r"); if (of == NULL) { printf("Could not open file %s\n", fasta_file); abort(); } c = fgetc(of); for(i = 0; i < num_reads; i++) { /* Read the comment line */ j = 0; line2[j++] = c; while((c = fgetc(of)) != '\n' && c != EOF && j < BUF_SIZE) { line2[j++] = c; } line2[j] = '\0'; if (j >= BUF_SIZE || c == EOF || line2[0] != '>') { printf("Fasta file %s has invalid format\n", fasta_file); abort(); } /* Skip the read */ while((c = fgetc(of)) != '>' && c != EOF); snprintf(line, BUF_SIZE, "%s ( %d edit operations)\n", line2, num_edits[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } snprintf(line, BUF_SIZE, "%s\n", reads[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } } fclose(f); } else if (fastq_file) { f = fopen(out_file, "w"); /* Open the original file to read the comments from there */ of = fopen(fastq_file, "r"); if (of == NULL) { printf("Could not open file %s\n", fastq_file); abort(); } c = fgetc(of); for(i = 0; i < num_reads; i++) { if (c != '@') { printf("Fastq file %s has invalid format\n", fastq_file); abort(); } /*comment*/ j = 0; line2[j++] = c; while((c = fgetc(of)) != '\n' && c != EOF) { line2[j++] = c; } line2[j] = '\0'; /*skip the read*/ while((c = fgetc(of)) != '\n' && c != EOF); snprintf(line, BUF_SIZE, "%s ( %d edit operations)\n", line2, num_edits[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } snprintf(line, BUF_SIZE, "%s\n", reads[i]); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } /*comment*/ j = 0; while((c = fgetc(of)) != '\n' && c != EOF) { line2[j++] = c; } line2[j] = '\0'; /*skip qualities*/ while((c = fgetc(of)) != '\n' && c != EOF); c = fgetc(of); snprintf(line, BUF_SIZE, "%s\n", line2); if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } for(j = 0; j < (signed)strlen((char *)reads[i]); j++) { line[j] = qual[i][j]+quality_offset; } line[strlen((char *)reads[i])] = '\n'; line[1+strlen((char *)reads[i])] = '\0'; if (!fwrite(line, sizeof(char), strlen(line), f)) { printf("Could not write to output file\n"); } } fclose(f); fclose(of); } return 0; }
circuitoOMP.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <omp.h> void check_circuit (int id, int z); int main (int argc, char *argv[]) { int i; int numeroDeHilos=strtol(argv[1],NULL,10); #pragma omp parallel num_threads(numeroDeHilos) { for( i = omp_get_thread_num(); i < 655356; i += numeroDeHilos) check_circuit (omp_get_thread_num(), i); printf("Proceso %d esta hecho\n", omp_get_thread_num()); fflush (stdout); } return 0; } #define EXTRACT_BIT(n,i) ((n&(1<<i))?1:0) void check_circuit (int id, int z) { int v[16]; int i; for(i=0; i<16; i++) v[i] = EXTRACT_BIT(z,i); if((v[0] || v[1]) && (!v[1] || !v[3]) &&(v[2] || v[3])&& (!v[3] || !v[4]) && (v[4] || !v[5]) && (v[5] || !v[6]) && (v[5] || v[6])&& (v[6] || !v[15]) && (v[7] || !v[8]) && (!v[7] || !v[13]) && (v[8] || v[9]) && (v[8] || !v[9]) && (!v[9] || !v[10]) && (v[9] || v[11]) && (v[10] || v[11]) && (v[12] || v[13]) && (v[13] || !v[14]) && (v[14] || v[15])) { printf("%d) %d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d\n", id,v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12],v[13],v[14],v[15]); fflush (stdout); } }
trace.c
#include "main.h" mat_rv trace_coo_nothreading(coo matrix) { mat_rv rv; if(matrix.rows != matrix.cols){ rv.error = ERR_WRONG_DIM; return rv; } coo result; result.type = matrix.type; result.cols = 1; result.rows = 1; result.length = 1; if(!(result.elems = (coo_elem*)malloc(1*sizeof(coo_elem)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } result.elems[0].type = matrix.type; result.elems[0].i = 0; result.elems[0].j = 0; if (result.elems[0].type == MAT_INT) result.elems[0].val.i = 0; else result.elems[0].val.f = 0.0; struct timespec start, end; get_utc_time(&start); for(int i = 0; i < matrix.length; ++i){ if(matrix.elems[i].i == matrix.elems[i].j){ if (result.elems[0].type == MAT_INT) result.elems[0].val.i += matrix.elems[i].val.i; else result.elems[0].val.f += matrix.elems[i].val.f; } } get_utc_time(&end); rv = coo_to_mat_nothreading(result); rv.t_process = time_delta(end, start); free_coo(result); return rv; } mat_rv trace_coo(coo matrix, int thread_count) { mat_rv rv; if(matrix.rows != matrix.cols){ rv.error = ERR_WRONG_DIM; return rv; } coo result; result.type = matrix.type; result.cols = 1; result.rows = 1; result.length = 1; if(!(result.elems = (coo_elem*)malloc(1*sizeof(coo_elem)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } result.elems[0].type = matrix.type; result.elems[0].i = 0; result.elems[0].j = 0; if (result.elems[0].type == MAT_INT) result.elems[0].val.i = 0; else result.elems[0].val.f = 0.0; struct timespec start, end; get_utc_time(&start); int i; #pragma omp parallel num_threads(thread_count) shared(result, matrix) { union{ int i; long double f; } local_result; MAT_TYPE local_type = result.type; #pragma omp for private(i) for(i = 0; i < matrix.length; ++i){ if(matrix.elems[i].i == matrix.elems[i].j){ if (local_type == MAT_INT) local_result.i += matrix.elems[i].val.i; else local_result.f += matrix.elems[i].val.f; } } #pragma omp critical { if(local_type == MAT_INT) result.elems[0].val.i += local_result.i; else result.elems[0].val.f += local_result.f; } } get_utc_time(&end); //add parallelism here later rv = coo_to_mat(result, thread_count); rv.t_process = time_delta(end, start); free_coo(result); return rv; } mat_rv trace_csr_nothreading(csr matrix) { mat_rv rv; if(matrix.rows != matrix.cols){ rv.error = ERR_WRONG_DIM; return rv; } struct timespec start, end; get_utc_time(&start); csr result; result.type = matrix.type; result.rows = 1; result.cols = 1; result.num_vals = 1; if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(1*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(1*sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allo0cating result matrix\n"); exit(EXIT_FAILURE); } } if(!(result.ja = (int*)malloc(1*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(result.ia = (int*)malloc(2*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } result.ja[0] = 0; result.ia[0] = 0; result.ia[1] = 1; if(result.type == MAT_INT) result.nnz.i[0] = 0; else result.nnz.f[0] = 0.0; for(int i = 0; i < matrix.rows; ++i){ int j = matrix.ia[i]; while(j < matrix.ia[i + 1] && matrix.ja[j] < i) ++j; if(matrix.ja[j] == i){ if(result.type == MAT_INT) result.nnz.i[0] += matrix.nnz.i[j]; else result.nnz.f[0] += matrix.nnz.f[j]; } } get_utc_time(&end); rv = csr_to_mat_nothreading(result); rv.t_process = time_delta(end, start); free_csr(result); return rv; } mat_rv trace_csr(csr matrix, int thread_count) { mat_rv rv; if(matrix.rows != matrix.cols){ rv.error = ERR_WRONG_DIM; return rv; } struct timespec start, end; get_utc_time(&start); csr result; result.type = matrix.type; result.rows = 1; result.cols = 1; result.num_vals = 1; if(result.type == MAT_INT){ if(!(result.nnz.i = (int*)malloc(1*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } } else{ if(!(result.nnz.f = (long double*)malloc(1*sizeof(long double)))){ fprintf(stderr, "Ran out of virtual memory while allo0cating result matrix\n"); exit(EXIT_FAILURE); } } if(!(result.ja = (int*)malloc(1*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } if(!(result.ia = (int*)malloc(2*sizeof(int)))){ fprintf(stderr, "Ran out of virtual memory while allocating result matrix\n"); exit(EXIT_FAILURE); } result.ja[0] = 0; result.ia[0] = 0; result.ia[1] = 1; if(result.type == MAT_INT) result.nnz.i[0] = 0; else result.nnz.f[0] = 0.0; int i; #pragma omp parallel num_threads(thread_count) shared(matrix, result) { //store result type to not have every thread requesting this resource MAT_TYPE local_type = result.type; union{ long double f; int i; } local_result; if(local_type == MAT_INT) local_result.i = 0; else local_result.f = 0.0; #pragma omp for private(i) for(i = 0; i < matrix.rows; ++i){ int j = matrix.ia[i]; while(j < matrix.ia[i + 1] && matrix.ja[j] < i) ++j; if(matrix.ja[j] == i){ if(local_type == MAT_INT) local_result.i += matrix.nnz.i[j]; else local_result.f += matrix.nnz.f[j]; } } #pragma omp critical { if(local_type == MAT_INT) result.nnz.i[0] += local_result.i; else result.nnz.f[0] += local_result.f; } } get_utc_time(&end); rv = csr_to_mat(result, thread_count); rv.t_process = time_delta(end, start); free_csr(result); return rv; } //identical behaviour for square matrices (as assumed otherwise errors) mat_rv trace_csc_nothreading(csc matrix) { return trace_csr_nothreading(matrix); } mat_rv trace_csc(csc matrix, int thread_count) { return trace_csr(matrix, thread_count); } //using trace to build every format initially mat_rv trace(OPERATIONARGS *args) { mat_rv rv; //default = COO if (args->format == FORM_DEFAULT) args->format = COO; switch(args->format){ case COO:{ struct timespec construct; struct timespec fileio_timer; coo matrix = read_coo(args->file1, &construct, &fileio_timer); if(args->nothreading) rv = trace_coo_nothreading(matrix); else rv = trace_coo(matrix, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = fileio_timer; free_coo(matrix); rv.isval = true; return rv; break; } case CSR:{ struct timespec construct; struct timespec fileio_timer; csr matrix = read_csr(args->file1, &construct, &fileio_timer); if(args->nothreading) rv = trace_csr_nothreading(matrix); else rv = trace_csr(matrix, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = fileio_timer; free_csr(matrix); rv.isval = true; return rv; break; } case CSC:{ struct timespec construct; struct timespec fileio_timer; csc matrix = read_csc(args->file1, &construct, &fileio_timer); if(args->nothreading) rv = trace_csc_nothreading(matrix); else rv = trace_csc(matrix, args->num_threads); rv.t_construct = time_sum(rv.t_construct, construct); rv.t_fileio = fileio_timer; free_csc(matrix); rv.isval = true; return rv; break; } default: fprintf(stderr, "format not implemented\n"); exit(EXIT_FAILURE); break; } //execution should never reach here rv.error = ERR_NOT_SET; return rv; }
perturbations.c
/** @file perturbations.c Documented perturbation module * * Julien Lesgourgues, 23.09.2010 * * Deals with the perturbation evolution. * This module has two purposes: * * - at the beginning; to initialize the perturbations, i.e. to * integrate the perturbation equations, and store temporarily the terms * contributing to the source functions as a function of conformal * time. Then, to perform a few manipulations of these terms in order to * infer the actual source functions \f$ S^{X} (k, \tau) \f$, and to * store them as a function of conformal time inside an interpolation * table. * * - at any time in the code; to evaluate the source functions at a * given conformal time (by interpolating within the interpolation * table). * * Hence the following functions can be called from other modules: * * -# perturb_init() at the beginning (but after background_init() and thermodynamics_init()) * -# perturb_sources_at_tau() at any later time * -# perturb_free() at the end, when no more calls to perturb_sources_at_tau() are needed */ #include "perturbations.h" /** * Source function \f$ S^{X} (k, \tau) \f$ at a given conformal time tau. * * Evaluate source functions at given conformal time tau by reading * the pre-computed table and interpolating. * * @param ppt Input: pointer to perturbation structure containing interpolation tables * @param index_md Input: index of requested mode * @param index_ic Input: index of requested initial condition * @param index_type Input: index of requested source function type * @param tau Input: any value of conformal time * @param psource Output: vector (already allocated) of source function as a function of k * @return the error status */ int perturb_sources_at_tau( struct perturbs * ppt, int index_md, int index_ic, int index_type, double tau, double * psource ) { /** Summary: */ /** - interpolate in pre-computed table contained in ppt */ class_call(array_interpolate_two_bis(ppt->tau_sampling, 1, 0, ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type], ppt->k_size[index_md], ppt->tau_size, tau, psource, ppt->k_size[index_md], ppt->error_message), ppt->error_message, ppt->error_message); return _SUCCESS_; } /** * Initialize the perturbs structure, and in particular the table of source functions. * * Main steps: * * - given the values of the flags describing which kind of * perturbations should be considered (modes: scalar/vector/tensor, * initial conditions, type of source functions needed...), * initialize indices and wavenumber list * * - define the time sampling for the output source functions * * - for each mode (scalar/vector/tensor): initialize the indices of * relevant perturbations, integrate the differential system, * compute and store the source functions. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Output: Initialized perturbation structure * @return the error status */ int perturb_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ /* running index for modes */ int index_md; /* running index for initial conditions */ int index_ic; /* running index for wavenumbers */ int index_k; /* pointer to one struct perturb_workspace per thread (one if no openmp) */ struct perturb_workspace ** pppw; /* number of threads (always one if no openmp) */ int number_of_threads=1; /* index of the thread (always 0 if no openmp) */ int thread=0; /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" just after leaving the parallel region. */ int abort; /* unsigned integer that will be set to the size of the workspace */ size_t sz; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop, tspent; #endif /** - perform preliminary checks */ if (ppt->has_perturbations == _FALSE_) { if (ppt->perturbations_verbose > 0) printf("No sources requested. Perturbation module skipped.\n"); return _SUCCESS_; } else { if (ppt->perturbations_verbose > 0) printf("Computing sources\n"); } class_test((ppt->gauge == synchronous) && (pba->has_cdm == _FALSE_), ppt->error_message, "In the synchronous gauge, it is not self-consistent to assume no CDM: the later is used to define the initial timelike hypersurface. You can either add a negligible amount of CDM or switch to newtonian gauge"); class_test ((ppr->tight_coupling_approximation < first_order_MB) || (ppr->tight_coupling_approximation > compromise_CLASS), ppt->error_message, "your tight_coupling_approximation is set to %d, out of range defined in perturbations.h",ppr->tight_coupling_approximation); class_test ((ppr->radiation_streaming_approximation < rsa_null) || (ppr->radiation_streaming_approximation > rsa_none), ppt->error_message, "your radiation_streaming_approximation is set to %d, out of range defined in perturbations.h",ppr->radiation_streaming_approximation); if (pba->has_ur == _TRUE_) { class_test ((ppr->ur_fluid_approximation < ufa_mb) || (ppr->ur_fluid_approximation > ufa_none), ppt->error_message, "your ur_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ur_fluid_approximation); } if (pba->has_ncdm == _TRUE_) { class_test ((ppr->ncdm_fluid_approximation < ncdmfa_mb) || (ppr->ncdm_fluid_approximation > ncdmfa_none), ppt->error_message, "your ncdm_fluid_approximation is set to %d, out of range defined in perturbations.h",ppr->ncdm_fluid_approximation); } if (pba->has_fld == _TRUE_) { class_test(pba->w0_fld+pba->wa_fld >= 0., ppt->error_message, "So far, the fluid is meant to be negligible at early time, and not to be important for defining the initial conditions of other species. You are using parameters for which this assumption may break down, so maybe it's the case to fully implement the fluid in the initial condition routine"); class_test((pba->w0_fld==-1.) && (pba->wa_fld==0.), ppt->error_message, "Your choice of a fluid with (w0,wa)=(-1,0) is not valid due to instabilities in the unphysical perturbations of such a fluid. Try instead with a plain cosmological constant"); class_test(((pba->w0_fld + pba->wa_fld +1.0)*(pba->w0_fld+1.0)) < 0.0, ppt->error_message, "w crosses -1 between the infinite past and today, and this would lead to divergent perturbation equations for the fluid."); } if (pba->has_dcdm == _TRUE_) { class_test((ppt->has_cdi == _TRUE_) || (ppt->has_bi == _TRUE_) || (ppt->has_nid == _TRUE_) || (ppt->has_niv == _TRUE_), ppt->error_message, "Non-adiabatic initial conditions not coded in presence of decaying dark matter"); } class_test(ppt->has_vectors == _TRUE_, ppt->error_message, "Vectors not coded yet"); if ((ppt->has_niv == _TRUE_) && (ppt->perturbations_verbose > 0)) { printf("Warning: the niv initial conditions in CLASS (and also in CAMB) should still be double-checked: if you want to do it and send feedback, you are welcome!\n"); } if (ppt->has_tensors == _TRUE_) { ppt->evolve_tensor_ur = _FALSE_; ppt->evolve_tensor_ncdm = _FALSE_; switch (ppt->tensor_method) { case (tm_photons_only): break; case (tm_massless_approximation): if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) ppt->evolve_tensor_ur = _TRUE_; break; case (tm_exact): if (pba->has_ur == _TRUE_) ppt->evolve_tensor_ur = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->evolve_tensor_ncdm = _TRUE_; break; } } /** - initialize all indices and lists in perturbs structure using perturb_indices_of_perturbs() */ class_call(perturb_indices_of_perturbs(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); if (ppt->z_max_pk > pth->z_rec) { class_test(ppt->has_cmb == _TRUE_, ppt->error_message, "You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you don't ask for a calculation of the CMB source function(s). Remove any CMB from your output and try e.g. with 'output=mTk' or 'output=mTk,vTk'", ppt->z_max_pk, pth->z_rec); class_test(ppt->has_source_delta_m == _TRUE_, ppt->error_message, "You requested a very high z_pk=%e, higher than z_rec=%e. This works very well when you ask only transfer functions, e.g. with 'output=mTk' or 'output=mTk,vTk'. But if you need the total matter (e.g. with 'mPk', 'dCl', etc.) there is an issue with the calculation of delta_m at very early times. By default, delta_m is a gauge-invariant variable (the density fluctuation in comoving gauge) and this quantity is hard to get accurately at very early times. The solution is to define delta_m as the density fluctuation in the current gauge, synchronous or newtonian. For the moment this must be done manually by commenting the line 'ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2;' in perturb_sources(). In the future there will be an option for doing it in an easier way.", ppt->z_max_pk, pth->z_rec); } /** - define the common time sampling for all sources using perturb_timesampling_for_sources() */ class_call(perturb_timesampling_for_sources(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); /** - if we want to store perturbations, write titles and allocate storage */ class_call(perturb_prepare_output(pba,ppt), ppt->error_message, ppt->error_message); /** - create an array of workspaces in multi-thread case */ #ifdef _OPENMP #pragma omp parallel { number_of_threads = omp_get_num_threads(); } #endif class_alloc(pppw,number_of_threads * sizeof(struct perturb_workspace *),ppt->error_message); /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < ppt->md_size; index_md++) { if (ppt->perturbations_verbose > 1) printf("Evolving mode %d/%d\n",index_md+1,ppt->md_size); abort = _FALSE_; sz = sizeof(struct perturb_workspace); #pragma omp parallel \ shared(pppw,ppr,pba,pth,ppt,index_md,abort,number_of_threads) \ private(thread) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); #endif /** - --> (a) create a workspace (one per thread in multi-thread case) */ class_alloc_parallel(pppw[thread],sz,ppt->error_message); /** - --> (b) initialize indices of vectors of perturbations with perturb_indices_of_current_vectors() */ class_call_parallel(perturb_workspace_init(ppr, pba, pth, ppt, index_md, pppw[thread]), ppt->error_message, ppt->error_message); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; /** - --> (c) loop over initial conditions and wavenumbers; for each of them, evolve perturbations and compute source functions with perturb_solve() */ for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { if (ppt->perturbations_verbose > 1) printf("Evolving ic %d/%d\n",index_ic+1,ppt->ic_size[index_md]); if (ppt->perturbations_verbose > 1) printf("evolving %d wavenumbers\n",ppt->k_size[index_md]); abort = _FALSE_; #pragma omp parallel \ shared(pppw,ppr,pba,pth,ppt,index_md,index_ic,abort,number_of_threads) \ private(index_k,thread,tstart,tstop,tspent) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); tspent=0.; #endif #pragma omp for schedule (dynamic) /* integrating backwards is slightly more optimal for parallel runs */ //for (index_k = 0; index_k < ppt->k_size; index_k++) { for (index_k = ppt->k_size[index_md]-1; index_k >=0; index_k--) { if ((ppt->perturbations_verbose > 2) && (abort == _FALSE_)) { printf("evolving mode k=%e /Mpc (%d/%d)",ppt->k[index_md][index_k],index_k+1,ppt->k_size[index_md]); if (pba->sgnK != 0) printf(" (for scalar modes, corresponds to nu=%e)",sqrt(ppt->k[index_md][index_k]*ppt->k[index_md][index_k]+pba->K)/sqrt(pba->sgnK*pba->K)); printf("\n"); } #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_call_parallel(perturb_solve(ppr, pba, pth, ppt, index_md, index_ic, index_k, pppw[thread]), ppt->error_message, ppt->error_message); #ifdef _OPENMP tstop = omp_get_wtime(); tspent += tstop-tstart; #endif #pragma omp flush(abort) } /* end of loop over wavenumbers */ #ifdef _OPENMP if (ppt->perturbations_verbose>1) printf("In %s: time spent in parallel region (loop over k's) = %e s for thread %d\n", __func__,tspent,omp_get_thread_num()); #endif } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } /* end of loop over initial conditions */ abort = _FALSE_; #pragma omp parallel \ shared(pppw,ppt,index_md,abort,number_of_threads) \ private(thread) \ num_threads(number_of_threads) { #ifdef _OPENMP thread=omp_get_thread_num(); #endif class_call_parallel(perturb_workspace_free(ppt,index_md,pppw[thread]), ppt->error_message, ppt->error_message); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } /* end loop over modes */ free(pppw); return _SUCCESS_; } /** * Free all memory space allocated by perturb_init(). * * To be called at the end of each run, only when no further calls to * perturb_sources_at_tau() are needed. * * @param ppt Input: perturbation structure to be freed * @return the error status */ int perturb_free( struct perturbs * ppt ) { int index_md,index_ic,index_type; int filenum; if (ppt->has_perturbations == _TRUE_) { for (index_md = 0; index_md < ppt->md_size; index_md++) { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { free(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type]); } } free(ppt->sources[index_md]); free(ppt->k[index_md]); } free(ppt->tau_sampling); free(ppt->tp_size); free(ppt->ic_size); free(ppt->k); free(ppt->k_size_cmb); free(ppt->k_size_cl); free(ppt->k_size); free(ppt->sources); /** Stuff related to perturbations output: */ /** - Free non-NULL pointers */ if (ppt->index_k_output_values != NULL) free(ppt->index_k_output_values); for (filenum = 0; filenum<_MAX_NUMBER_OF_K_FILES_; filenum++){ if (ppt->scalar_perturbations_data[filenum] != NULL) free(ppt->scalar_perturbations_data[filenum]); if (ppt->vector_perturbations_data[filenum] != NULL) free(ppt->vector_perturbations_data[filenum]); if (ppt->tensor_perturbations_data[filenum] != NULL) free(ppt->tensor_perturbations_data[filenum]); } } return _SUCCESS_; } /** * Initialize all indices and allocate most arrays in perturbs structure. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input/Output: Initialized perturbation structure * @return the error status */ int perturb_indices_of_perturbs( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ int index_type; int index_md; int index_ic; int index_type_common; /** - count modes (scalar, vector, tensor) and assign corresponding indices */ index_md = 0; class_define_index(ppt->index_md_scalars,ppt->has_scalars,index_md,1); class_define_index(ppt->index_md_vectors,ppt->has_vectors,index_md,1); class_define_index(ppt->index_md_tensors,ppt->has_tensors,index_md,1); ppt->md_size = index_md; class_test(index_md == 0, ppt->error_message, "you should have at least one out of {scalars, vectors, tensors} !!!"); /** - allocate array of number of types for each mode, ppt->tp_size[index_md] */ class_alloc(ppt->tp_size,ppt->md_size*sizeof(int),ppt->error_message); /** - allocate array of number of initial conditions for each mode, ppt->ic_size[index_md] */ class_alloc(ppt->ic_size,ppt->md_size*sizeof(int),ppt->error_message); /** - allocate array of arrays of source functions for each mode, ppt->source[index_md] */ class_alloc(ppt->sources,ppt->md_size * sizeof(double *),ppt->error_message); /** - initialization of all flags to false (will eventually be set to true later) */ ppt->has_cmb = _FALSE_; ppt->has_lss = _FALSE_; ppt->has_source_t = _FALSE_; ppt->has_source_p = _FALSE_; ppt->has_source_delta_m = _FALSE_; ppt->has_source_delta_g = _FALSE_; ppt->has_source_delta_b = _FALSE_; ppt->has_source_delta_cdm = _FALSE_; ppt->has_source_delta_dcdm = _FALSE_; ppt->has_source_delta_fld = _FALSE_; ppt->has_source_delta_scf = _FALSE_; ppt->has_source_delta_dr = _FALSE_; ppt->has_source_delta_ur = _FALSE_; ppt->has_source_delta_ncdm = _FALSE_; ppt->has_source_theta_m = _FALSE_; ppt->has_source_theta_g = _FALSE_; ppt->has_source_theta_b = _FALSE_; ppt->has_source_theta_cdm = _FALSE_; ppt->has_source_theta_dcdm = _FALSE_; ppt->has_source_theta_fld = _FALSE_; ppt->has_source_theta_scf = _FALSE_; ppt->has_source_theta_dr = _FALSE_; ppt->has_source_theta_ur = _FALSE_; ppt->has_source_theta_ncdm = _FALSE_; ppt->has_source_phi = _FALSE_; ppt->has_source_phi_prime = _FALSE_; ppt->has_source_phi_plus_psi = _FALSE_; ppt->has_source_psi = _FALSE_; /** - source flags and indices, for sources that all modes have in common (temperature, polarization, ...). For temperature, the term t2 is always non-zero, while other terms are non-zero only for scalars and vectors. For polarization, the term e is always non-zero, while the term b is only for vectors and tensors. */ if (ppt->has_cl_cmb_temperature == _TRUE_) { ppt->has_source_t = _TRUE_; ppt->has_cmb = _TRUE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { ppt->has_source_p = _TRUE_; ppt->has_cmb = _TRUE_; } index_type = 0; class_define_index(ppt->index_tp_t2,ppt->has_source_t,index_type,1); class_define_index(ppt->index_tp_p,ppt->has_source_p,index_type,1); index_type_common = index_type; /* indices for perturbed recombination */ class_define_index(ppt->index_tp_perturbed_recombination_delta_temp,ppt->has_perturbed_recombination,index_type,1); class_define_index(ppt->index_tp_perturbed_recombination_delta_chi,ppt->has_perturbed_recombination,index_type,1); /** - define k values with perturb_get_k_list() */ class_call(perturb_get_k_list(ppr, pba, pth, ppt), ppt->error_message, ppt->error_message); /** - loop over modes. Initialize flags and indices which are specific to each mode. */ for (index_md = 0; index_md < ppt->md_size; index_md++) { /** - (a) scalars */ if (_scalars_) { /** - --> source flags and indices, for sources that are specific to scalars */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) || (ppt->has_cl_lensing_potential)) { ppt->has_lss = _TRUE_; ppt->has_source_phi_plus_psi = _TRUE_; } if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_nl_corrections_based_on_delta_m)) { ppt->has_lss = _TRUE_; ppt->has_source_delta_m = _TRUE_; } if (ppt->has_density_transfers == _TRUE_) { ppt->has_lss = _TRUE_; ppt->has_source_delta_g = _TRUE_; ppt->has_source_delta_b = _TRUE_; if (pba->has_cdm == _TRUE_) ppt->has_source_delta_cdm = _TRUE_; if (pba->has_dcdm == _TRUE_) ppt->has_source_delta_dcdm = _TRUE_; if (pba->has_fld == _TRUE_) ppt->has_source_delta_fld = _TRUE_; if (pba->has_scf == _TRUE_) ppt->has_source_delta_scf = _TRUE_; if (pba->has_ur == _TRUE_) ppt->has_source_delta_ur = _TRUE_; if (pba->has_dr == _TRUE_) ppt->has_source_delta_dr = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->has_source_delta_ncdm = _TRUE_; // Thanks to the following lines, (phi,psi) are also stored as sources // (Obtained directly in newtonian gauge, infereed from (h,eta) in synchronous gauge). // If density transfer functions are requested in the (default) CLASS format, // (phi, psi) will be appended to the delta_i's in the final output. ppt->has_source_phi = _TRUE_; ppt->has_source_psi = _TRUE_; } if (ppt->has_velocity_transfers == _TRUE_) { ppt->has_lss = _TRUE_; ppt->has_source_theta_g = _TRUE_; ppt->has_source_theta_b = _TRUE_; if ((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous)) ppt->has_source_theta_cdm = _TRUE_; if (pba->has_dcdm == _TRUE_) ppt->has_source_theta_dcdm = _TRUE_; if (pba->has_fld == _TRUE_) ppt->has_source_theta_fld = _TRUE_; if (pba->has_scf == _TRUE_) ppt->has_source_theta_scf = _TRUE_; if (pba->has_ur == _TRUE_) ppt->has_source_theta_ur = _TRUE_; if (pba->has_dr == _TRUE_) ppt->has_source_theta_dr = _TRUE_; if (pba->has_ncdm == _TRUE_) ppt->has_source_theta_ncdm = _TRUE_; } if (ppt->has_cl_number_count == _TRUE_) { ppt->has_lss = _TRUE_; if (ppt->has_nc_density == _TRUE_) { ppt->has_source_delta_m = _TRUE_; } if (ppt->has_nc_rsd == _TRUE_) { ppt->has_source_theta_m = _TRUE_; } if (ppt->has_nc_lens == _TRUE_) { ppt->has_source_phi_plus_psi = _TRUE_; } if (ppt->has_nc_gr == _TRUE_) { ppt->has_source_phi = _TRUE_; ppt->has_source_psi = _TRUE_; ppt->has_source_phi_prime = _TRUE_; ppt->has_source_phi_plus_psi = _TRUE_; } } index_type = index_type_common; class_define_index(ppt->index_tp_t0, ppt->has_source_t, index_type,1); class_define_index(ppt->index_tp_t1, ppt->has_source_t, index_type,1); class_define_index(ppt->index_tp_delta_m, ppt->has_source_delta_m, index_type,1); class_define_index(ppt->index_tp_delta_g, ppt->has_source_delta_g, index_type,1); class_define_index(ppt->index_tp_delta_b, ppt->has_source_delta_b, index_type,1); class_define_index(ppt->index_tp_delta_cdm, ppt->has_source_delta_cdm, index_type,1); class_define_index(ppt->index_tp_delta_dcdm, ppt->has_source_delta_dcdm,index_type,1); class_define_index(ppt->index_tp_delta_fld, ppt->has_source_delta_fld, index_type,1); class_define_index(ppt->index_tp_delta_scf, ppt->has_source_delta_scf, index_type,1); class_define_index(ppt->index_tp_delta_dr, ppt->has_source_delta_dr, index_type,1); class_define_index(ppt->index_tp_delta_ur, ppt->has_source_delta_ur, index_type,1); class_define_index(ppt->index_tp_delta_ncdm1,ppt->has_source_delta_ncdm,index_type,pba->N_ncdm); class_define_index(ppt->index_tp_theta_m, ppt->has_source_theta_m, index_type,1); class_define_index(ppt->index_tp_theta_g, ppt->has_source_theta_g, index_type,1); class_define_index(ppt->index_tp_theta_b, ppt->has_source_theta_b, index_type,1); class_define_index(ppt->index_tp_theta_cdm, ppt->has_source_theta_cdm, index_type,1); class_define_index(ppt->index_tp_theta_dcdm, ppt->has_source_theta_dcdm,index_type,1); class_define_index(ppt->index_tp_theta_fld, ppt->has_source_theta_fld, index_type,1); class_define_index(ppt->index_tp_theta_scf, ppt->has_source_theta_scf, index_type,1); class_define_index(ppt->index_tp_theta_dr, ppt->has_source_theta_dr, index_type,1); class_define_index(ppt->index_tp_theta_ur, ppt->has_source_theta_ur, index_type,1); class_define_index(ppt->index_tp_theta_ncdm1,ppt->has_source_theta_ncdm,index_type,pba->N_ncdm); class_define_index(ppt->index_tp_phi, ppt->has_source_phi, index_type,1); class_define_index(ppt->index_tp_phi_prime, ppt->has_source_phi_prime, index_type,1); class_define_index(ppt->index_tp_phi_plus_psi,ppt->has_source_phi_plus_psi,index_type,1); class_define_index(ppt->index_tp_psi, ppt->has_source_psi, index_type,1); ppt->tp_size[index_md] = index_type; class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for scalars, so you should have at least one non-zero scalar source type (temperature, polarization, lensing/gravitational potential, ...). Please adjust your input."); /** - --> count scalar initial conditions (for scalars: ad, cdi, nid, niv; for tensors: only one) and assign corresponding indices */ index_ic = 0; class_define_index(ppt->index_ic_ad, ppt->has_ad, index_ic,1); class_define_index(ppt->index_ic_bi, ppt->has_bi, index_ic,1); class_define_index(ppt->index_ic_cdi,ppt->has_cdi,index_ic,1); class_define_index(ppt->index_ic_nid,ppt->has_nid,index_ic,1); class_define_index(ppt->index_ic_niv,ppt->has_niv,index_ic,1); ppt->ic_size[index_md] = index_ic; class_test(index_ic == 0, ppt->error_message, "you should have at least one adiabatic or isocurvature initial condition...} !!!"); } /** - (b) vectors */ if (_vectors_) { /** - --> source flags and indices, for sources that are specific to vectors */ index_type = index_type_common; class_define_index(ppt->index_tp_t1,ppt->has_source_t,index_type,1); ppt->tp_size[index_md] = index_type; /* class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for vectors, so you should have at least one non-zero vector source type (temperature or polarization). Please adjust your input."); */ /** - --> initial conditions for vectors*/ index_ic = 0; /* not coded yet */ ppt->ic_size[index_md] = index_ic; } /** - (c) tensors */ if (_tensors_) { /** - --> source flags and indices, for sources that are specific to tensors */ index_type = index_type_common; /* nothing specific, unlike for vectors and scalars! */ ppt->tp_size[index_md] = index_type; /* class_test(index_type == 0, ppt->error_message, "inconsistent input: you asked for tensors, so you should have at least one non-zero tensor source type (temperature or polarization). Please adjust your input."); */ /** - --> only one initial condition for tensors*/ index_ic = 0; class_define_index(ppt->index_ic_ten,_TRUE_,index_ic,1); ppt->ic_size[index_md] = index_ic; } /** - (d) for each mode, allocate array of arrays of source functions for each initial conditions and wavenumber, (ppt->source[index_md])[index_ic][index_type] */ class_alloc(ppt->sources[index_md], ppt->ic_size[index_md] * ppt->tp_size[index_md] * sizeof(double *), ppt->error_message); } return _SUCCESS_; } /** * Define time sampling for source functions. * * For each type, compute the list of values of tau at which sources * will be sampled. Knowing the number of tau values, allocate all * arrays of source functions. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input/Output: Initialized perturbation structure * @return the error status */ int perturb_timesampling_for_sources( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { /** Summary: */ /** - define local variables */ int counter; int index_md; int index_type; int index_ic; int last_index_back; int last_index_thermo; int first_index_back; int first_index_thermo; double tau; double tau_ini; double tau_lower; double tau_upper; double tau_mid; double timescale_source; double rate_thermo; double rate_isw_squared; double a_prime_over_a; double a_primeprime_over_a; double * pvecback; double * pvecthermo; /** - allocate background/thermodynamics vectors */ class_alloc(pvecback,pba->bg_size_short*sizeof(double),ppt->error_message); class_alloc(pvecthermo,pth->th_size*sizeof(double),ppt->error_message); /** - first, just count the number of sampling points in order to allocate the array containing all values */ /** - (a) if CMB requested, first sampling point = when the universe stops being opaque; otherwise, start sampling gravitational potential at recombination [however, if perturbed recombination is requested, we also need to start the system before recombination. Otherwise, the initial conditions for gas temperature and ionization fraction perturbations (delta_T = 1/3 delta_b, delta_x_e) are not valid]. */ if ((ppt->has_cmb == _TRUE_)||(ppt->has_perturbed_recombination == _TRUE_)) { /* using bisection, search time tau such that the ratio of thermo to Hubble time scales tau_c/tau_h=aH/kappa' is equal to start_sources_at_tau_c_over_tau_h */ tau_lower = pth->tau_ini; class_call(background_at_tau(pba, tau_lower, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); class_test(pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] > ppr->start_sources_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for computing sources is inappropriate: it corresponds to an earlier time than the one at which the integration of thermodynamical variables started (tau=%g). You should increase either 'start_sources_at_tau_c_over_tau_h' or 'recfast_z_initial'\n", tau_lower); tau_upper = pth->tau_rec; class_call(background_at_tau(pba, tau_upper, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); class_test(pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] < ppr->start_sources_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for computing sources is inappropriate: it corresponds to a time after recombination. You should decrease 'start_sources_at_tau_c_over_tau_h'\n"); tau_mid = 0.5*(tau_lower + tau_upper); while (tau_upper - tau_lower > ppr->tol_tau_approx) { class_call(background_at_tau(pba, tau_mid, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (pvecback[pba->index_bg_a]* pvecback[pba->index_bg_H]/ pvecthermo[pth->index_th_dkappa] > ppr->start_sources_at_tau_c_over_tau_h) tau_upper = tau_mid; else tau_lower = tau_mid; tau_mid = 0.5*(tau_lower + tau_upper); } tau_ini = tau_mid; } else { /* check the time corresponding to the highest redshift requested in output plus one */ class_call(background_tau_of_z(pba, ppt->z_max_pk+1, &tau_ini), pba->error_message, ppt->error_message); /* obsolete: previous choice was to start always at recombination time */ /* tau_ini = pth->tau_rec; */ /* set values of first_index_back/thermo */ class_call(background_at_tau(pba, tau_ini, pba->short_info, pba->inter_normal, &first_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &first_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); } /** - (b) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where: - --> if CMB requested: timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$; timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today. - --> if CMB not requested: timescale_source = 1/aH; repeat till today. */ counter = 1; last_index_back = first_index_back; last_index_thermo = first_index_thermo; tau = tau_ini; while (tau < pba->conformal_age) { class_call(background_at_tau(pba, tau, pba->short_info, pba->inter_closeby, &last_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &last_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (ppt->has_cmb == _TRUE_) { /* variation rate of thermodynamics variables */ rate_thermo = pvecthermo[pth->index_th_rate]; /* variation rate of metric due to late ISW effect (important at late times) */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + 2. * a_prime_over_a * a_prime_over_a; rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a); /* compute rate */ timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared); } else { /* variation rate given by Hubble time */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; timescale_source = a_prime_over_a; } /* check it is non-zero */ class_test(timescale_source == 0., ppt->error_message, "null evolution rate, integration is diverging"); /* compute inverse rate */ timescale_source = 1./timescale_source; class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation, ppt->error_message, "integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source); tau = tau + ppr->perturb_sampling_stepsize*timescale_source; counter++; } /** - --> infer total number of time steps, ppt->tau_size */ ppt->tau_size = counter; /** - --> allocate array of time steps, ppt->tau_sampling[index_tau] */ class_alloc(ppt->tau_sampling,ppt->tau_size * sizeof(double),ppt->error_message); /** - --> repeat the same steps, now filling the array with each tau value: */ /** - --> (b.1.) first sampling point = when the universe stops being opaque */ counter = 0; ppt->tau_sampling[counter]=tau_ini; /** - --> (b.2.) next sampling point = previous + ppr->perturb_sampling_stepsize * timescale_source, where timescale_source1 = \f$ |g/\dot{g}| = |\dot{\kappa}-\ddot{\kappa}/\dot{\kappa}|^{-1} \f$; timescale_source2 = \f$ |2\ddot{a}/a-(\dot{a}/a)^2|^{-1/2} \f$ (to sample correctly the late ISW effect; and timescale_source=1/(1/timescale_source1+1/timescale_source2); repeat till today. If CMB not requested: timescale_source = 1/aH; repeat till today. */ last_index_back = first_index_back; last_index_thermo = first_index_thermo; tau = tau_ini; while (tau < pba->conformal_age) { class_call(background_at_tau(pba, tau, pba->short_info, pba->inter_closeby, &last_index_back, pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &last_index_thermo, pvecback, pvecthermo), pth->error_message, ppt->error_message); if (ppt->has_cmb == _TRUE_) { /* variation rate of thermodynamics variables */ rate_thermo = pvecthermo[pth->index_th_rate]; /* variation rate of metric due to late ISW effect (important at late times) */ a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + 2. * a_prime_over_a * a_prime_over_a; rate_isw_squared = fabs(2.*a_primeprime_over_a-a_prime_over_a*a_prime_over_a); /* compute rate */ timescale_source = sqrt(rate_thermo*rate_thermo+rate_isw_squared); } else { a_prime_over_a = pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a]; timescale_source = a_prime_over_a; } /* check it is non-zero */ class_test(timescale_source == 0., ppt->error_message, "null evolution rate, integration is diverging"); /* compute inverse rate */ timescale_source = 1./timescale_source; class_test(fabs(ppr->perturb_sampling_stepsize*timescale_source/tau) < ppr->smallest_allowed_variation, ppt->error_message, "integration step =%e < machine precision : leads either to numerical error or infinite loop",ppr->perturb_sampling_stepsize*timescale_source); tau = tau + ppr->perturb_sampling_stepsize*timescale_source; counter++; ppt->tau_sampling[counter]=tau; } /** - last sampling point = exactly today */ ppt->tau_sampling[counter] = pba->conformal_age; free(pvecback); free(pvecthermo); /** - loop over modes, initial conditions and types. For each of them, allocate array of source functions. */ for (index_md = 0; index_md < ppt->md_size; index_md++) { for (index_ic = 0; index_ic < ppt->ic_size[index_md]; index_ic++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { class_alloc(ppt->sources[index_md][index_ic*ppt->tp_size[index_md]+index_type], ppt->k_size[index_md] * ppt->tau_size * sizeof(double), ppt->error_message); } } } return _SUCCESS_; } /** * Define the number of comoving wavenumbers using the information * passed in the precision structure. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input: pointer to perturbation structure * @return the error status */ int perturb_get_k_list( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt ) { int index_k, index_k_output, index_mode; double k,k_min=0.,k_rec,step,tau1; double * k_max_cmb; double * k_max_cl; double k_max=0.; double scale2; double *tmp_k_list; int newk_size, index_newk, add_k_output_value; /** Summary: */ class_test(ppr->k_step_transition == 0., ppt->error_message, "stop to avoid division by zero"); class_test(pth->rs_rec == 0., ppt->error_message, "stop to avoid division by zero"); /** - allocate arrays related to k list for each mode */ class_alloc(ppt->k_size_cmb, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k_size_cl, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k_size, ppt->md_size*sizeof(int), ppt->error_message); class_alloc(ppt->k, ppt->md_size*sizeof(double*), ppt->error_message); class_calloc(k_max_cmb, ppt->md_size, sizeof(double), ppt->error_message); class_calloc(k_max_cl, ppt->md_size, sizeof(double), ppt->error_message); /** - scalar modes */ if (ppt->has_scalars == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((8.-1.e-4)*pba->K); } /** - --> find k_max (as well as k_max_cmb[ppt->index_md_scalars], k_max_cl[ppt->index_md_scalars]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_scalars] = k_min; k_max_cl[ppt->index_md_scalars] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb[ppt->index_md_scalars] : */ /* choose a k_max_cmb[ppt->index_md_scalars] corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl[ppt->index_md_scalars]*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_scalars] = ppr->k_max_tau0_over_l_max*ppt->l_scalar_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_scalars] = k_max_cmb[ppt->index_md_scalars]; k_max = k_max_cmb[ppt->index_md_scalars]; /* find k_max_cl[ppt->index_md_scalars] : */ /* if we need density/lensing Cl's, we must impose a stronger condition, such that the minimum wavelength on the shell corresponding to the center of smallest redshift bin is seen under an angle smaller than pi/lmax. So we must multiply our previous k_max_cl[ppt->index_md_scalars] by the ratio tau0/(tau0-tau[center of smallest redshift bin]). Note that we could do the same with the lensing potential if we needed a very precise C_l^phi-phi at large l. We don't do it by default, because the lensed ClT, ClE would be marginally affected. */ if ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_)) { class_call(background_tau_of_z(pba, ppt->selection_mean[0], &tau1), pba->error_message, ppt->error_message); k_max_cl[ppt->index_md_scalars] = MAX(k_max_cl[ppt->index_md_scalars],ppr->k_max_tau0_over_l_max*ppt->l_lss_max/(pba->conformal_age-tau1)); // to be very accurate we should use angular diameter distance to given redshift instead of comoving radius: would implement corrections depending on curvature k_max = k_max_cl[ppt->index_md_scalars]; } } /* find k_max: */ if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) k_max = MAX(k_max,ppt->k_max_for_pk); if (ppt->has_nl_corrections_based_on_delta_m == _TRUE_) k_max = MAX(k_max,ppr->halofit_min_k_max); /** - --> test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in the vicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_scalars], ((int)((k_max_cmb[ppt->index_md_scalars]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+ (int)(MAX(ppr->k_per_decade_for_pk,ppr->k_per_decade_for_bao)*log(k_max/k_min)/log(10.))+3) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_scalars] */ while (k < k_max_cmb[ppt->index_md_scalars]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_scalars] = index_k; /* values until k_max_cl[ppt->index_md_scalars] */ while (k < k_max_cl[ppt->index_md_scalars]) { k *= pow(10.,1./(ppr->k_per_decade_for_pk +(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk) *(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4))))); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size_cl[ppt->index_md_scalars] = index_k; /* values until k_max */ while (k < k_max) { k *= pow(10.,1./(ppr->k_per_decade_for_pk +(ppr->k_per_decade_for_bao-ppr->k_per_decade_for_pk) *(1.-tanh(pow((log(k)-log(ppr->k_bao_center*k_rec))/log(ppr->k_bao_width),4))))); ppt->k[ppt->index_md_scalars][index_k] = k; index_k++; } ppt->k_size[ppt->index_md_scalars] = index_k; class_realloc(ppt->k[ppt->index_md_scalars], ppt->k[ppt->index_md_scalars], ppt->k_size[ppt->index_md_scalars]*sizeof(double), ppt->error_message); } /** - vector modes */ if (ppt->has_vectors == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((7.-1.e-4)*pba->K); } /** - --> find k_max (as well as k_max_cmb[ppt->index_md_vectors], k_max_cl[ppt->index_md_vectors]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_vectors] = k_min; k_max_cl[ppt->index_md_vectors] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb: */ /* choose a k_max_cmb corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_vectors] = ppr->k_max_tau0_over_l_max*ppt->l_vector_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_vectors] = k_max_cmb[ppt->index_md_vectors]; k_max = k_max_cmb[ppt->index_md_vectors]; } /** - --> test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in the vicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_vectors], ((int)((k_max_cmb[ppt->index_md_vectors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_vectors][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_vectors] */ while (k < k_max_cmb[ppt->index_md_vectors]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_scalars][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_vectors][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_vectors] = index_k; ppt->k_size_cl[ppt->index_md_vectors] = index_k; ppt->k_size[ppt->index_md_vectors] = index_k; class_realloc(ppt->k[ppt->index_md_vectors], ppt->k[ppt->index_md_vectors], ppt->k_size[ppt->index_md_vectors]*sizeof(double), ppt->error_message); } /** - tensor modes */ if (ppt->has_tensors == _TRUE_) { /* first value */ if (pba->sgnK == 0) { /* K<0 (flat) : start close to zero */ k_min=ppr->k_min_tau0/pba->conformal_age; } else if (pba->sgnK == -1) { /* K<0 (open) : start close to sqrt(-K) (in transfer modules, for scalars, this will correspond to q close to zero; for vectors and tensors, this value is even smaller than the minimum necessary value) */ k_min=sqrt(-pba->K+pow(ppr->k_min_tau0/pba->conformal_age/pth->angular_rescaling,2)); } else if (pba->sgnK == 1) { /* K>0 (closed): start from q=sqrt(k2+(1+m)K) equal to 3sqrt(K), i.e. k=sqrt((8-m)K) */ k_min = sqrt((6.-1.e-4)*pba->K); } /** - --> find k_max (as well as k_max_cmb[ppt->index_md_tensors], k_max_cl[ppt->index_md_tensors]) */ k_rec = 2. * _PI_ / pth->rs_rec; /* comoving scale corresponding to sound horizon at recombination */ k_max_cmb[ppt->index_md_tensors] = k_min; k_max_cl[ppt->index_md_tensors] = k_min; k_max = k_min; if (ppt->has_cls == _TRUE_) { /* find k_max_cmb[ppt->index_md_tensors]: */ /* choose a k_max_cmb[ppt->index_md_tensors] corresponding to a wavelength on the last scattering surface seen today under an angle smaller than pi/lmax: this is equivalent to k_max_cl[ppt->index_md_tensors]*[comvoving.ang.diameter.distance] > l_max */ k_max_cmb[ppt->index_md_tensors] = ppr->k_max_tau0_over_l_max*ppt->l_tensor_max /pba->conformal_age/pth->angular_rescaling; k_max_cl[ppt->index_md_tensors] = k_max_cmb[ppt->index_md_tensors]; k_max = k_max_cmb[ppt->index_md_tensors]; } /** - --> test that result for k_min, k_max make sense */ class_test(k_min<0., ppt->error_message, "buggy definition of k_min"); class_test(k_max<0., ppt->error_message, "buggy definition of k_max"); class_test(k_max<k_min, ppt->error_message, "buggy definition of k_min and/or k_max"); /* if K>0, the transfer function will be calculated for discrete integer values of nu=3,4,5,... where nu=sqrt(k2+(1+m)K) and m=0,1,2 for scalars/vectors/tensors. However we are free to define in the perturbation module some arbitrary values of k: later on, the transfer module will interpolate at values of k corresponding exactly to integer values of nu. Hence, apart from the value of k_min and the step size in the vicinity of k_min, we define exactly the same sampling in the three cases K=0, K<0, K>0 */ /* allocate array with, for the moment, the largest possible size */ class_alloc(ppt->k[ppt->index_md_tensors], ((int)((k_max_cmb[ppt->index_md_tensors]-k_min)/k_rec/MIN(ppr->k_step_super,ppr->k_step_sub))+1) *sizeof(double),ppt->error_message); /* first value */ index_k=0; k = k_min; ppt->k[ppt->index_md_tensors][index_k] = k; index_k++; /* values until k_max_cmb[ppt->index_md_tensors] */ while (k < k_max_cmb[ppt->index_md_tensors]) { /* the linear step is not constant, it has a step-like shape, centered around the characteristic scale set by the sound horizon at recombination (associated to the comoving wavenumber k_rec) */ step = (ppr->k_step_super + 0.5 * (tanh((k-k_rec)/k_rec/ppr->k_step_transition)+1.) * (ppr->k_step_sub-ppr->k_step_super)) * k_rec; /* there is one other thing to take into account in the step size. There are two other characteristic scales that matter for the sampling: the Hubble scale today, k0=a0H0, and eventually curvature scale sqrt(|K|). We define "scale2" as the sum of the squared Hubble radius and squared curvature radius. We need to increase the sampling for k<sqrt(scale2), in order to get the first mutipoles accurate enough. The formula below reduces it gradually in the k-->0 limit, by up to a factor 10. The actual stepsize is still fixed by k_step_super, this is just a reduction factor. */ scale2 = pow(pba->a_today*pba->H0,2)+fabs(pba->K); step *= (k*k/scale2+1.)/(k*k/scale2+1./ppr->k_step_super_reduction); class_test(step / k < ppr->smallest_allowed_variation, ppt->error_message, "k step =%e < machine precision : leads either to numerical error or infinite loop", step * k_rec); k += step; class_test(k <= ppt->k[ppt->index_md_tensors][index_k-1], ppt->error_message, "consecutive values of k should differ and should be in growing order"); ppt->k[ppt->index_md_tensors][index_k] = k; index_k++; } ppt->k_size_cmb[ppt->index_md_tensors] = index_k; ppt->k_size_cl[ppt->index_md_tensors] = index_k; ppt->k_size[ppt->index_md_tensors] = index_k; class_realloc(ppt->k[ppt->index_md_tensors], ppt->k[ppt->index_md_tensors], ppt->k_size[ppt->index_md_tensors]*sizeof(double), ppt->error_message); } /** - If user asked for k_output_values, add those to all k lists: */ if (ppt->k_output_values_num>0){ /* Allocate storage */ class_alloc(ppt->index_k_output_values,sizeof(double)*ppt->md_size*ppt->k_output_values_num,ppt->error_message); /** - --> Find indices in ppt->k[index_md] corresponding to 'k_output_values'. We are assuming that ppt->k is sorted and growing, and we have made sure that ppt->k_output_values is also sorted and growing.*/ for (index_mode=0; index_mode<ppt->md_size; index_mode++){ newk_size = ppt->k_size[index_mode]+ppt->k_output_values_num; class_alloc(tmp_k_list,sizeof(double)*newk_size,ppt->error_message); index_k=0; index_k_output=0; for (index_newk=0; index_newk<newk_size; index_newk++){ /** - --> Decide if we should add k_output_value now. This has to be this complicated, since we can only compare the k-values when both indices are in range.*/ if (index_k >= ppt->k_size[index_mode]) add_k_output_value = _TRUE_; else if (index_k_output >= ppt->k_output_values_num) add_k_output_value = _FALSE_; else if (ppt->k_output_values[index_k_output] < ppt->k[index_mode][index_k]) add_k_output_value = _TRUE_; else add_k_output_value = _FALSE_; if (add_k_output_value == _TRUE_){ tmp_k_list[index_newk] = ppt->k_output_values[index_k_output]; ppt->index_k_output_values[index_mode*ppt->k_output_values_num+index_k_output]=index_newk; index_k_output++; } else{ tmp_k_list[index_newk] = ppt->k[index_mode][index_k]; index_k++; } } free(ppt->k[index_mode]); ppt->k[index_mode] = tmp_k_list; ppt->k_size[index_mode] = newk_size; index_k = newk_size-1; while (ppt->k[index_mode][index_k] > k_max_cl[index_mode]) index_k--; ppt->k_size_cl[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]); index_k = newk_size-1; while (ppt->k[index_mode][index_k] > k_max_cmb[index_mode]) index_k--; ppt->k_size_cmb[index_mode] = MIN(index_k+2,ppt->k_size[index_mode]); /** - --> The two MIN statements are here because in a normal run, the cl and cmb arrays contain a single k value larger than their respective k_max. We are mimicking this behavior. */ } } /* For testing, can be useful to print the k list in a file: FILE * out=fopen("output/k","w"); for (index_k=0; index_k < ppt->k_size[0]; index_k++) { fprintf(out,"%e\n",ppt->k[0][index_k],pba->K); } fclose(out); */ /** - finally, find the global k_min and k_max for the ensemble of all modes 9scalars, vectors, tensors) */ ppt->k_min = _HUGE_; ppt->k_max = 0.; if (ppt->has_scalars == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_scalars][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_scalars][ppt->k_size[ppt->index_md_scalars]-1]); /* last value, inferred from perturbations structure */ } if (ppt->has_vectors == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_vectors][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_vectors][ppt->k_size[ppt->index_md_vectors]-1]); /* last value, inferred from perturbations structure */ } if (ppt->has_tensors == _TRUE_) { ppt->k_min = MIN(ppt->k_min,ppt->k[ppt->index_md_tensors][0]); /* first value, inferred from perturbations structure */ ppt->k_max = MAX(ppt->k_max,ppt->k[ppt->index_md_tensors][ppt->k_size[ppt->index_md_tensors]-1]); /* last value, inferred from perturbations structure */ } free(k_max_cmb); free(k_max_cl); return _SUCCESS_; } /** * Initialize a perturb_workspace structure. All fields are allocated * here, with the exception of the perturb_vector '-->pv' field, which * is allocated separately in perturb_vector_init. We allocate one * such perturb_workspace structure per thread and per mode * (scalar/../tensor). Then, for each thread, all initial conditions * and wavenumbers will use the same workspace. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param ppw Input/Output: pointer to perturb_workspace structure which fields are allocated or filled here * @return the error status */ int perturb_workspace_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ int index_mt=0; int index_ap; int l; /** - Compute maximum l_max for any multipole */; if (_scalars_) { ppw->max_l_max = MAX(ppr->l_max_g, ppr->l_max_pol_g); if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur); if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm); if (pba->has_dr == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_dr); } if (_tensors_) { ppw->max_l_max = MAX(ppr->l_max_g_ten, ppr->l_max_pol_g_ten); if (pba->has_ur == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ur); if (pba->has_ncdm == _TRUE_) ppw->max_l_max = MAX(ppw->max_l_max, ppr->l_max_ncdm); } /** - Allocate \f$ s_l\f$[ ] array for freestreaming of multipoles (see arXiv:1305.3261) and initialize to 1.0, which is the K=0 value. */ class_alloc(ppw->s_l, sizeof(double)*(ppw->max_l_max+1),ppt->error_message); for (l=0; l<=ppw->max_l_max; l++){ ppw->s_l[l] = 1.0; } /** - define indices of metric perturbations obeying constraint equations (this can be done once and for all, because the vector of metric perturbations is the same whatever the approximation scheme, unlike the vector of quantities to be integrated, which is allocated separately in perturb_vector_init) */ if (_scalars_) { /* newtonian gauge */ if (ppt->gauge == newtonian) { class_define_index(ppw->index_mt_psi,_TRUE_,index_mt,1); /* psi */ class_define_index(ppw->index_mt_phi_prime,_TRUE_,index_mt,1); /* phi' */ } /* synchronous gauge (note that eta is counted in the vector of quantities to be integrated, while here we only consider quantities obeying to constraint equations) */ if (ppt->gauge == synchronous) { class_define_index(ppw->index_mt_h_prime,_TRUE_,index_mt,1); /* h' */ class_define_index(ppw->index_mt_h_prime_prime,_TRUE_,index_mt,1); /* h'' */ class_define_index(ppw->index_mt_eta_prime,_TRUE_,index_mt,1); /* eta' */ class_define_index(ppw->index_mt_alpha,_TRUE_,index_mt,1); /* alpha = (h' + 6 tau') / (2 k**2) */ class_define_index(ppw->index_mt_alpha_prime,_TRUE_,index_mt,1); /* alpha' */ } } if (_vectors_) { /* newtonian gauge */ if (ppt->gauge == newtonian) { class_define_index(ppw->index_mt_V_prime,_TRUE_,index_mt,1); } if (ppt->gauge == synchronous) { class_define_index(ppw->index_mt_hv_prime_prime,_TRUE_,index_mt,1); } } if (_tensors_) { class_define_index(ppw->index_mt_gw_prime_prime,_TRUE_,index_mt,1); } ppw->mt_size = index_mt; /** - allocate some workspace in which we will store temporarily the values of background, thermodynamics, metric and source quantities at a given time */ class_alloc(ppw->pvecback,pba->bg_size_normal*sizeof(double),ppt->error_message); class_alloc(ppw->pvecthermo,pth->th_size*sizeof(double),ppt->error_message); class_alloc(ppw->pvecmetric,ppw->mt_size*sizeof(double),ppt->error_message); /** - count number of approximations, initialize their indices, and allocate their flags */ index_ap=0; class_define_index(ppw->index_ap_tca,_TRUE_,index_ap,1); class_define_index(ppw->index_ap_rsa,_TRUE_,index_ap,1); if (_scalars_) { class_define_index(ppw->index_ap_ufa,pba->has_ur,index_ap,1); class_define_index(ppw->index_ap_ncdmfa,pba->has_ncdm,index_ap,1); } ppw->ap_size=index_ap; if (ppw->ap_size > 0) class_alloc(ppw->approx,ppw->ap_size*sizeof(int),ppt->error_message); /** - For definiteness, initialize approximation flags to arbitrary values (correct values are overwritten in pertub_find_approximation_switches) */ if (_scalars_) { ppw->approx[ppw->index_ap_tca]=(int)tca_on; ppw->approx[ppw->index_ap_rsa]=(int)rsa_off; if (pba->has_ur == _TRUE_) { ppw->approx[ppw->index_ap_ufa]=(int)ufa_off; } if (pba->has_ncdm == _TRUE_) { ppw->approx[ppw->index_ap_ncdmfa]=(int)ncdmfa_off; } } if (_tensors_) { ppw->approx[ppw->index_ap_tca]=(int)tca_on; ppw->approx[ppw->index_ap_rsa]=(int)rsa_off; } /** - allocate fields where some of the perturbations are stored */ if (_scalars_) { if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { class_alloc(ppw->delta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppw->theta_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppw->shear_ncdm,pba->N_ncdm*sizeof(double),ppt->error_message); } } return _SUCCESS_; } /** * Free the perturb_workspace structure (with the exception of the * perturb_vector '-->pv' field, which is freed separately in * perturb_vector_free). * * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param ppw Input: pointer to perturb_workspace structure to be freed * @return the error status */ int perturb_workspace_free ( struct perturbs * ppt, int index_md, struct perturb_workspace * ppw ) { free(ppw->s_l); free(ppw->pvecback); free(ppw->pvecthermo); free(ppw->pvecmetric); if (ppw->ap_size > 0) free(ppw->approx); if (_scalars_) { if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { free(ppw->delta_ncdm); free(ppw->theta_ncdm); free(ppw->shear_ncdm); } } free(ppw); return _SUCCESS_; } /** * Solve the perturbation evolution for a given mode, initial * condition and wavenumber, and compute the corresponding source * functions. * * For a given mode, initial condition and wavenumber, this function * finds the time ranges over which the perturbations can be described * within a given approximation. For each such range, it initializes * (or redistributes) perturbations using perturb_vector_init(), and * integrates over time. Whenever a "source sampling time" is passed, * the source terms are computed and stored in the source table using * perturb_sources(). * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input/Output: pointer to the perturbation structure (output source functions S(k,tau) written here) * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param index_k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @return the error status */ int perturb_solve( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, int index_ic, int index_k, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ /* contains all fixed parameters, indices and workspaces used by the perturb_derivs function */ struct perturb_parameters_and_workspace ppaw; /* conformal time */ double tau,tau_lower,tau_upper,tau_mid; /* multipole */ int l; /* index running over time */ int index_tau; /* number of values in the tau_sampling array that should be considered for a given mode */ int tau_actual_size; /* running index over types (temperature, etc) */ int index_type; /* Fourier mode */ double k; /* number of time intervals where the approximation scheme is uniform */ int interval_number; /* index running over such time intervals */ int index_interval; /* number of time intervals where each particular approximation is uniform */ int * interval_number_of; /* edge of intervals where approximation scheme is uniform: tau_ini, tau_switch_1, ..., tau_end */ double * interval_limit; /* array of approximation scheme within each interval: interval_approx[index_interval][index_ap] */ int ** interval_approx; /* index running over approximations */ int index_ap; /* approximation scheme within previous interval: previous_approx[index_ap] */ int * previous_approx; int n_ncdm,is_early_enough; /* function pointer to ODE evolver and names of possible evolvers */ extern int evolver_rk(); extern int evolver_ndf15(); int (*generic_evolver)(); /* Related to the perturbation output */ int (*perhaps_print_variables)(); int index_ikout; /* Related to the scf */ double Omega_phi,theta_phi,y1_phi,m_scf_over_H; /** - initialize indices relevant for back/thermo tables search */ ppw->last_index_back=0; ppw->last_index_thermo=0; ppw->inter_mode = pba->inter_normal; /** - get wavenumber value */ k = ppt->k[index_md][index_k]; class_test(k == 0., ppt->error_message, "stop to avoid division by zero"); /** - If non-zero curvature, update array of free-streaming coefficients ppw->s_l */ if (pba->has_curvature == _TRUE_){ for (l = 0; l<=ppw->max_l_max; l++){ ppw->s_l[l] = sqrt(MAX(1.0-pba->K*(l*l-1.0)/k/k,0.)); } } /** - maximum value of tau for which sources are calculated for this wavenumber */ /* by default, today */ tau_actual_size = ppt->tau_size; /** - using bisection, compute minimum value of tau for which this wavenumber is integrated */ /* will be at least the first time in the background table */ tau_lower = pba->tau_table[0]; class_call(background_at_tau(pba, tau_lower, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., pth->inter_normal, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /* check that this initial time is indeed OK given imposed conditions on kappa' and on k/aH */ class_test(ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa] > ppr->start_small_k_at_tau_c_over_tau_h, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_small_k_at_tau_c_over_tau_h' up to at least %g, or decrease 'a_ini_over_a_today_default'\n", ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa]); class_test(k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] > ppr->start_large_k_at_tau_h_over_tau_k, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time before that at which the background has been integrated. You should increase 'start_large_k_at_tau_h_over_tau_k' up to at least %g, or decrease 'a_ini_over_a_today_default'\n", ppt->k[index_md][ppt->k_size[index_md]-1]/ppw->pvecback[pba->index_bg_a]/ ppw->pvecback[pba->index_bg_H]); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { class_test(fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.)>ppr->tol_ncdm_initial_w, ppt->error_message, "your choice of initial time for integrating wavenumbers is inappropriate: it corresponds to a time at which the ncdm species number %d is not ultra-relativistic anymore, with w=%g, p=%g and rho=%g\n", n_ncdm, ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm], ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm], ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]); } } /* is at most the time at which sources must be sampled */ tau_upper = ppt->tau_sampling[0]; /* start bisection */ tau_mid = 0.5*(tau_lower + tau_upper); while ((tau_upper - tau_lower)/tau_lower > ppr->tol_tau_approx) { is_early_enough = _TRUE_; class_call(background_at_tau(pba, tau_mid, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); /* if there are non-cold relics, check that they are relativistic enough */ if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { if (fabs(ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]-1./3.) > ppr->tol_ncdm_initial_w) is_early_enough = _FALSE_; } } /* also check that the two conditions on (aH/kappa') and (aH/k) are fulfilled */ if (is_early_enough == _TRUE_) { class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_normal, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); if ((ppw->pvecback[pba->index_bg_a]* ppw->pvecback[pba->index_bg_H]/ ppw->pvecthermo[pth->index_th_dkappa] > ppr->start_small_k_at_tau_c_over_tau_h) || (k/ppw->pvecback[pba->index_bg_a]/ppw->pvecback[pba->index_bg_H] > ppr->start_large_k_at_tau_h_over_tau_k)) is_early_enough = _FALSE_; //if (pba->has_scf == _TRUE_) { /* also check that the scf is slowly-rolling */ //Omega_phi = ppw->pvecback[pba->index_bg_Omega_phi_scf]; //theta_phi = ppw->pvecback[pba->index_bg_theta_phi_scf]; //y1_phi = ppw->pvecback[pba->index_bg_y_phi_scf]; /** Following expression comes from a trigonometric-hyperbolic identity for mass_scf */ //m_scf_over_H = 0.5*pow(pow(y1_phi,2.)+2.*y2_phi_scf(pba,Omega_phi,theta_phi,y1_phi)*exp(0.5*Omega_phi)*cos_scf(pba,0.5*theta_phi),0.5); /*if (m_scf_over_H > 1.e-2) is_early_enough = _FALSE_; }*/ } if (is_early_enough == _TRUE_) tau_lower = tau_mid; else tau_upper = tau_mid; tau_mid = 0.5*(tau_lower + tau_upper); } tau = tau_mid; /** - find the number of intervals over which approximation scheme is constant */ class_alloc(interval_number_of,ppw->ap_size*sizeof(int),ppt->error_message); ppw->inter_mode = pba->inter_normal; class_call(perturb_find_approximation_number(ppr, pba, pth, ppt, index_md, k, ppw, tau, ppt->tau_sampling[tau_actual_size-1], &interval_number, interval_number_of), ppt->error_message, ppt->error_message); class_alloc(interval_limit,(interval_number+1)*sizeof(double),ppt->error_message); class_alloc(interval_approx,interval_number*sizeof(int*),ppt->error_message); for (index_interval=0; index_interval<interval_number; index_interval++) class_alloc(interval_approx[index_interval],ppw->ap_size*sizeof(int),ppt->error_message); class_call(perturb_find_approximation_switches(ppr, pba, pth, ppt, index_md, k, ppw, tau, ppt->tau_sampling[tau_actual_size-1], ppr->tol_tau_approx, interval_number, interval_number_of, interval_limit, interval_approx), ppt->error_message, ppt->error_message); free(interval_number_of); /** - fill the structure containing all fixed parameters, indices and workspaces needed by perturb_derivs */ ppaw.ppr = ppr; ppaw.pba = pba; ppaw.pth = pth; ppaw.ppt = ppt; ppaw.index_md = index_md; ppaw.index_ic = index_ic; ppaw.index_k = index_k; ppaw.k = k; ppaw.ppw = ppw; ppaw.ppw->inter_mode = pba->inter_closeby; ppaw.ppw->last_index_back = 0; ppaw.ppw->last_index_thermo = 0; /** - check whether we need to print perturbations to a file for this wavenumber */ perhaps_print_variables = NULL; ppw->index_ikout = -1; for (index_ikout=0; index_ikout<ppt->k_output_values_num; index_ikout++){ if (ppt->index_k_output_values[index_md*ppt->k_output_values_num+index_ikout] == index_k){ ppw->index_ikout = index_ikout; perhaps_print_variables = perturb_print_variables; /* class_call(perturb_prepare_output_file( pba,ppt,ppw,index_ikout,index_md), ppt->error_message, ppt->error_message); */ } } /** - loop over intervals over which approximation scheme is uniform. For each interval: */ for (index_interval=0; index_interval<interval_number; index_interval++) { /** - --> (a) fix the approximation scheme */ for (index_ap=0; index_ap<ppw->ap_size; index_ap++) ppw->approx[index_ap]=interval_approx[index_interval][index_ap]; /** - --> (b) get the previous approximation scheme. If the current interval starts from the initial time tau_ini, the previous approximation is set to be a NULL pointer, so that the function perturb_vector_init() knows that perturbations must be initialized */ if (index_interval==0) { previous_approx=NULL; } else { previous_approx=interval_approx[index_interval-1]; } /** - --> (c) define the vector of perturbations to be integrated over. If the current interval starts from the initial time tau_ini, fill the vector with initial conditions for each mode. If it starts from an approximation switching point, redistribute correctly the perturbations from the previous to the new vector of perturbations. */ class_call(perturb_vector_init(ppr, pba, pth, ppt, index_md, index_ic, k, interval_limit[index_interval], ppw, previous_approx), ppt->error_message, ppt->error_message); /** - --> (d) integrate the perturbations over the current interval. */ if(ppr->evolver == rk){ generic_evolver = evolver_rk; } else{ generic_evolver = evolver_ndf15; } class_call(generic_evolver(perturb_derivs, interval_limit[index_interval], interval_limit[index_interval+1], ppw->pv->y, ppw->pv->used_in_sources, ppw->pv->pt_size, &ppaw, ppr->tol_perturb_integration, ppr->smallest_allowed_variation, perturb_timescale, ppr->perturb_integration_stepsize, ppt->tau_sampling, tau_actual_size, perturb_sources, perhaps_print_variables, ppt->error_message), ppt->error_message, ppt->error_message); } /** - if perturbations were printed in a file, close the file */ //if (perhaps_print_variables != NULL) // fclose(ppw->perturb_output_file); /** - fill the source terms array with zeros for all times between the last integrated time tau_max and tau_today. */ for (index_tau = tau_actual_size; index_tau < ppt->tau_size; index_tau++) { for (index_type = 0; index_type < ppt->tp_size[index_md]; index_type++) { ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + index_type] [index_tau * ppt->k_size[index_md] + index_k] = 0.; } } /** - free quantities allocated at the beginning of the routine */ class_call(perturb_vector_free(ppw->pv), ppt->error_message, ppt->error_message); for (index_interval=0; index_interval<interval_number; index_interval++) free(interval_approx[index_interval]); free(interval_approx); free(interval_limit); return _SUCCESS_; } int perturb_prepare_output(struct background * pba, struct perturbs * ppt){ int n_ncdm; char tmp[40]; ppt->scalar_titles[0]='\0'; ppt->vector_titles[0]='\0'; ppt->tensor_titles[0]='\0'; if (ppt->k_output_values_num > 0) { /** Write titles for all perturbations that we would like to print/store. */ if (ppt->has_scalars == _TRUE_){ class_store_columntitle(ppt->scalar_titles,"tau [Mpc]",_TRUE_); class_store_columntitle(ppt->scalar_titles,"a",_TRUE_); class_store_columntitle(ppt->scalar_titles,"delta_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"theta_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"shear_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol0_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol1_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"pol2_g",_TRUE_); class_store_columntitle(ppt->scalar_titles,"delta_b",_TRUE_); class_store_columntitle(ppt->scalar_titles,"theta_b",_TRUE_); class_store_columntitle(ppt->scalar_titles,"psi",_TRUE_); class_store_columntitle(ppt->scalar_titles,"phi",_TRUE_); /* Perturbed recombination */ class_store_columntitle(ppt->scalar_titles,"delta_Tb",ppt->has_perturbed_recombination); class_store_columntitle(ppt->scalar_titles,"delta_chi",ppt->has_perturbed_recombination); /* Ultrarelativistic species */ class_store_columntitle(ppt->scalar_titles,"delta_ur",pba->has_ur); class_store_columntitle(ppt->scalar_titles,"theta_ur",pba->has_ur); class_store_columntitle(ppt->scalar_titles,"shear_ur",pba->has_ur); /* Cold dark matter */ class_store_columntitle(ppt->scalar_titles,"delta_cdm",pba->has_cdm); class_store_columntitle(ppt->scalar_titles,"theta_cdm",pba->has_cdm); /* Non-cold dark matter */ if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ sprintf(tmp,"delta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); sprintf(tmp,"theta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); sprintf(tmp,"shear_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); sprintf(tmp,"cs2_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->scalar_titles,tmp,_TRUE_); } } /* Decaying cold dark matter */ class_store_columntitle(ppt->scalar_titles, "delta_dcdm", pba->has_dcdm); class_store_columntitle(ppt->scalar_titles, "theta_dcdm", pba->has_dcdm); /* Decay radiation */ class_store_columntitle(ppt->scalar_titles, "delta_dr", pba->has_dr); class_store_columntitle(ppt->scalar_titles, "theta_dr", pba->has_dr); class_store_columntitle(ppt->scalar_titles, "shear_dr", pba->has_dr); /* Scalar field scf */ class_store_columntitle(ppt->scalar_titles, "delta_scf", pba->has_scf); class_store_columntitle(ppt->scalar_titles, "theta_scf", pba->has_scf); class_store_columntitle(ppt->scalar_titles, "omega_scf", pba->has_scf); class_store_columntitle(ppt->scalar_titles, "delta0_scf", pba->has_scf); class_store_columntitle(ppt->scalar_titles, "delta1_scf", pba->has_scf); ppt->number_of_scalar_titles = get_number_of_titles(ppt->scalar_titles); } if (ppt->has_tensors == _TRUE_){ class_store_columntitle(ppt->tensor_titles,"tau [Mpc]",_TRUE_); class_store_columntitle(ppt->tensor_titles,"a",_TRUE_); class_store_columntitle(ppt->tensor_titles,"delta_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"shear_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"l4_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol0_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol2_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"pol4_g",_TRUE_); class_store_columntitle(ppt->tensor_titles,"H (gw)",_TRUE_); class_store_columntitle(ppt->tensor_titles,"Hdot (gwdot)",_TRUE_); class_store_columntitle(ppt->tensor_titles,"delta_ur",ppt->evolve_tensor_ur); class_store_columntitle(ppt->tensor_titles,"shear_ur",ppt->evolve_tensor_ur); class_store_columntitle(ppt->tensor_titles,"l4_ur",ppt->evolve_tensor_ur); if (ppt->evolve_tensor_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ sprintf(tmp,"delta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); sprintf(tmp,"theta_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); sprintf(tmp,"shear_ncdm[%d]",n_ncdm); class_store_columntitle(ppt->tensor_titles,tmp,_TRUE_); } } ppt->number_of_tensor_titles = get_number_of_titles(ppt->tensor_titles); } } return _SUCCESS_; } /** * For a given mode and wavenumber, find the number of intervals of * time between tau_ini and tau_end such that the approximation * scheme (and the number of perturbation equations) is uniform. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @param tau_ini Input: initial time of the perturbation integration * @param tau_end Input: final time of the perturbation integration * @param interval_number Output: total number of intervals * @param interval_number_of Output: number of intervals with respect to each particular approximation * @return the error status */ int perturb_find_approximation_number( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, struct perturb_workspace * ppw, double tau_ini, double tau_end, int * interval_number, int * interval_number_of /* interval_number_of[index_ap] (already allocated) */ ){ /** Summary: */ /* index running over approximations */ int index_ap; /* value of a given approximation at tau_ini and tau_end */ int flag_ini,flag_end; /** - fix default number of intervals to one (if no approximation switch) */ *interval_number=1; /** - loop over each approximation and add the number of approximation switching times */ for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_ini, ppw), ppt->error_message, ppt->error_message); flag_ini = ppw->approx[index_ap]; class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_end, ppw), ppt->error_message, ppt->error_message); flag_end = ppw->approx[index_ap]; class_test(flag_end<flag_ini, ppt->error_message, "For each approximation scheme, the declaration of approximation labels in the enumeration must follow chronological order, e.g: enum approx_flags {flag1, flag2, flag3} with flag1 being the initial one and flag3 the final one"); *interval_number += flag_end-flag_ini; interval_number_of[index_ap] = flag_end-flag_ini+1; } return _SUCCESS_; } /** * For a given mode and wavenumber, find the values of time at which * the approximation changes. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: index of wavenumber * @param ppw Input: pointer to perturb_workspace structure containing index values and workspaces * @param tau_ini Input: initial time of the perturbation integration * @param tau_end Input: final time of the perturbation integration * @param precision Input: tolerance on output values * @param interval_number Input: total number of intervals * @param interval_number_of Input: number of intervals with respect to each particular approximation * @param interval_limit Output: value of time at the boundary of the intervals: tau_ini, tau_switch1, ..., tau_end * @param interval_approx Output: value of approximations in each interval * @return the error status */ int perturb_find_approximation_switches( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, struct perturb_workspace * ppw, double tau_ini, double tau_end, double precision, int interval_number, int * interval_number_of, double * interval_limit, /* interval_limit[index_interval] (already allocated) */ int ** interval_approx /* interval_approx[index_interval][index_ap] (already allocated) */ ){ /** Summary: */ int index_ap; int index_switch; int index_switch_tot; int num_switch; double tau_min,lower_bound,upper_bound; double mid=0; double * unsorted_tau_switch; double next_tau_switch; int flag_ini; int num_switching_at_given_time; /** - write in output arrays the initial time and approximation */ interval_limit[0]=tau_ini; class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_ini, ppw), ppt->error_message, ppt->error_message); for (index_ap=0; index_ap<ppw->ap_size; index_ap++) interval_approx[0][index_ap]=ppw->approx[index_ap]; /** - if there are no approximation switches, just write final time and return */ if (interval_number == 1) { interval_limit[1]=tau_end; } /** - if there are switches, consider approximations one after each other. Find switching time by bisection. Store all switches in arbitrary order in array unsorted_tau_switch[ ] */ else { class_alloc(unsorted_tau_switch,(interval_number-1)*sizeof(double),ppt->error_message); index_switch_tot=0; for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { if (interval_number_of[index_ap] > 1) { num_switch = interval_number_of[index_ap]-1; tau_min = tau_ini; flag_ini = interval_approx[0][index_ap]; for (index_switch=0; index_switch<num_switch; index_switch++) { lower_bound=tau_min; upper_bound=tau_end; mid = 0.5*(lower_bound+upper_bound); while (upper_bound - lower_bound > precision) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, mid, ppw), ppt->error_message, ppt->error_message); if (ppw->approx[index_ap] > flag_ini+index_switch) { upper_bound=mid; } else { lower_bound=mid; } mid = 0.5*(lower_bound+upper_bound); } unsorted_tau_switch[index_switch_tot]=mid; index_switch_tot++; tau_min=mid; } } } class_test(index_switch_tot != (interval_number-1), ppt->error_message, "bug in approximation switch search routine: should have %d = %d", index_switch_tot,interval_number-1); /** - now sort interval limits in correct order */ index_switch_tot=1; while (index_switch_tot < interval_number) { next_tau_switch=tau_end; for (index_switch=0; index_switch<interval_number-1; index_switch++) { if ((unsorted_tau_switch[index_switch] > interval_limit[index_switch_tot-1]) && (unsorted_tau_switch[index_switch] < next_tau_switch)) { next_tau_switch=unsorted_tau_switch[index_switch]; } } interval_limit[index_switch_tot]=next_tau_switch; index_switch_tot++; } interval_limit[index_switch_tot]=tau_end; class_test(index_switch_tot != interval_number, ppt->error_message, "most probably two approximation switching time were found to be equal, which cannot be handled\n"); /** - store each approximation in chronological order */ for (index_switch=1; index_switch<interval_number; index_switch++) { class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, 0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]), ppw), ppt->error_message, ppt->error_message); for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { interval_approx[index_switch][index_ap]=ppw->approx[index_ap]; /* check here that approximation does not go backward (remember that by definition the value of an approximation can only increase) */ class_test(interval_approx[index_switch][index_ap] < interval_approx[index_switch-1][index_ap], ppt->error_message, "The approximation with label %d is not defined correctly: it goes backward (from %d to %d) for k=%e and between tau=%e and %e; this cannot be handled\n", index_ap, interval_approx[index_switch-1][index_ap], interval_approx[index_switch][index_ap], k, 0.5*(interval_limit[index_switch-1]+interval_limit[index_switch]), 0.5*(interval_limit[index_switch]+interval_limit[index_switch+1]) ); } /* check here that more than one approximation is not switched on at a given time */ num_switching_at_given_time=0; for (index_ap=0; index_ap<ppw->ap_size; index_ap++) { if (interval_approx[index_switch][index_ap] != interval_approx[index_switch-1][index_ap]) num_switching_at_given_time++; } class_test(num_switching_at_given_time != 1, ppt->error_message, "for k=%e, at tau=%g, you switch %d approximations at the same time, this cannot be handled. Usually happens in two cases: triggers for different approximations coincide, or one approx is reversible\n", k, interval_limit[index_switch], num_switching_at_given_time); if (ppt->perturbations_verbose>2) { if (_scalars_) { if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) && (interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off)) fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //fprintf(stderr,"Mode k=%e: will switch off tight-coupling approximation at tau=%e\n",k,interval_limit[index_switch]); //TBC if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) && (interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on)) fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation at tau=%e\n",k,interval_limit[index_switch]); if (pba->has_ur == _TRUE_) { if ((interval_approx[index_switch-1][ppw->index_ap_ufa]==(int)ufa_off) && (interval_approx[index_switch][ppw->index_ap_ufa]==(int)ufa_on)) { fprintf(stdout,"Mode k=%e: will switch on ur fluid approximation at tau=%e\n",k,interval_limit[index_switch]); } } if (pba->has_ncdm == _TRUE_) { if ((interval_approx[index_switch-1][ppw->index_ap_ncdmfa]==(int)ncdmfa_off) && (interval_approx[index_switch][ppw->index_ap_ncdmfa]==(int)ncdmfa_on)) { fprintf(stdout,"Mode k=%e: will switch on ncdm fluid approximation at tau=%e\n",k,interval_limit[index_switch]); } } } if (_tensors_) { if ((interval_approx[index_switch-1][ppw->index_ap_tca]==(int)tca_on) && (interval_approx[index_switch][ppw->index_ap_tca]==(int)tca_off)) fprintf(stdout,"Mode k=%e: will switch off tight-coupling approximation for tensors at tau=%e\n",k,interval_limit[index_switch]); if ((interval_approx[index_switch-1][ppw->index_ap_rsa]==(int)rsa_off) && (interval_approx[index_switch][ppw->index_ap_rsa]==(int)rsa_on)) fprintf(stdout,"Mode k=%e: will switch on radiation streaming approximation for tensors at tau=%e\n",k,interval_limit[index_switch]); } } } free(unsorted_tau_switch); class_call(perturb_approximations(ppr, pba, pth, ppt, index_md, k, tau_end, ppw), ppt->error_message, ppt->error_message); } return _SUCCESS_; } /** * Initialize the field '-->pv' of a perturb_workspace structure, which * is a perturb_vector structure. This structure contains indices and * values of all quantities which need to be integrated with respect * to time (and only them: quantities fixed analytically or obeying * constraint equations are NOT included in this vector). This routine * distinguishes between two cases: * * --> the input pa_old is set to the NULL pointer: * * This happens when we start integrating over a new wavenumber and we * want to set initial conditions for the perturbations. Then, it is * assumed that ppw-->pv is not yet allocated. This routine allocates * it, defines all indices, and then fills the vector ppw-->pv-->y with * the initial conditions defined in perturb_initial_conditions. * * --> the input pa_old is not set to the NULL pointer and describes * some set of approximations: * * This happens when we need to change approximation scheme while * integrating over a given wavenumber. The new approximation * described by ppw-->pa is then different from pa_old. Then, this * routine allocates a new vector with a new size and new index * values; it fills this vector with initial conditions taken from the * previous vector passed as an input in ppw-->pv, and eventually with * some analytic approximations for the new variables appearing at * this time; then the new vector comes in replacement of the old one, * which is freed. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to the thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y. * @param pa_old Input: NULL is we need to set y to initial conditions for a new wavenumber; points towards a perturb_approximations if we want to switch of approximation. * @return the error status */ int perturb_vector_init( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, int index_ic, double k, double tau, struct perturb_workspace * ppw, /* ppw->pv unallocated if pa_old = NULL, allocated and filled otherwise */ int * pa_old ) { /** Summary: */ /** - define local variables */ struct perturb_vector * ppv; int index_pt; int l; int n_ncdm,index_q,ncdm_l_size; double rho_plus_p_ncdm,q,q2,epsilon,a,factor; /** - allocate a new perturb_vector structure to which ppw-->pv will point at the end of the routine */ class_alloc(ppv,sizeof(struct perturb_vector),ppt->error_message); /** - initialize pointers to NULL (they will be allocated later if needed), relevant for perturb_vector_free() */ ppv->l_max_ncdm = NULL; ppv->q_size_ncdm = NULL; /** - define all indices in this new vector (depends on approximation scheme, described by the input structure ppw-->pa) */ index_pt = 0; if (_scalars_) { /* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */ class_test(ppr->l_max_g < 4, ppt->error_message, "ppr->l_max_g should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third and fourth momentum"); /* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */ class_test(ppr->l_max_pol_g < 4, ppt->error_message, "ppr->l_max_pol_g should be at least 4"); /* reject inconsistent values of the number of mutipoles in decay radiation hierarchy */ if (pba->has_dr == _TRUE_) { class_test(ppr->l_max_dr < 4, ppt->error_message, "ppr->l_max_dr should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum"); } /* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */ if (pba->has_ur == _TRUE_) { class_test(ppr->l_max_ur < 4, ppt->error_message, "ppr->l_max_ur should be at least 4, i.e. we must integrate at least over neutrino/relic density, velocity, shear, third and fourth momentum"); } /* photons */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ /* temperature */ ppv->l_max_g = ppr->l_max_g; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* higher momenta */ /* polarization */ ppv->l_max_pol_g = ppr->l_max_pol_g; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); } } /* baryons */ class_define_index(ppv->index_pt_delta_b,_TRUE_,index_pt,1); /* baryon density */ class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* baryon velocity */ /* cdm */ class_define_index(ppv->index_pt_delta_cdm,pba->has_cdm,index_pt,1); /* cdm density */ class_define_index(ppv->index_pt_theta_cdm,pba->has_cdm && (ppt->gauge == newtonian),index_pt,1); /* cdm velocity */ /* dcdm */ class_define_index(ppv->index_pt_delta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm density */ class_define_index(ppv->index_pt_theta_dcdm,pba->has_dcdm,index_pt,1); /* dcdm velocity */ /* ultra relativistic decay radiation */ if (pba->has_dr==_TRUE_){ ppv->l_max_dr = ppr->l_max_dr; class_define_index(ppv->index_pt_F0_dr,_TRUE_,index_pt,ppv->l_max_dr+1); /* all momenta in Boltzmann hierarchy */ } /* fluid */ class_define_index(ppv->index_pt_delta_fld,pba->has_fld,index_pt,1); /* fluid density */ class_define_index(ppv->index_pt_theta_fld,pba->has_fld,index_pt,1); /* fluid velocity */ /* scalar field */ class_define_index(ppv->index_pt_omega_scf,pba->has_scf,index_pt,1); /* scalar field oscillating frequency */ class_define_index(ppv->index_pt_delta0_scf,pba->has_scf,index_pt,1); /* scalar field first density contrast */ class_define_index(ppv->index_pt_delta1_scf,pba->has_scf,index_pt,1); /* scalar field second density contrast */ /* perturbed recombination: the indices are defined once tca is off. */ if ( (ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ class_define_index(ppv->index_pt_perturbed_recombination_delta_temp,_TRUE_,index_pt,1); class_define_index(ppv->index_pt_perturbed_recombination_delta_chi,_TRUE_,index_pt,1); } /* ultra relativistic neutrinos */ if (pba->has_ur && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { class_define_index(ppv->index_pt_delta_ur,_TRUE_,index_pt,1); /* density of ultra-relativistic neutrinos/relics */ class_define_index(ppv->index_pt_theta_ur,_TRUE_,index_pt,1); /* velocity of ultra-relativistic neutrinos/relics */ class_define_index(ppv->index_pt_shear_ur,_TRUE_,index_pt,1); /* shear of ultra-relativistic neutrinos/relics */ if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->l_max_ur = ppr->l_max_ur; class_define_index(ppv->index_pt_l3_ur,_TRUE_,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */ } } /* non-cold dark matter */ if (pba->has_ncdm == _TRUE_) { ppv->index_pt_psi0_ncdm1 = index_pt; /* density of ultra-relativistic neutrinos/relics */ ppv->N_ncdm = pba->N_ncdm; class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ // Set value of ppv->l_max_ncdm: if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_off){ /* reject inconsistent values of the number of mutipoles in ultra relativistic neutrino hierarchy */ class_test(ppr->l_max_ncdm < 4, ppt->error_message, "ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm); //Copy value from precision parameter: ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm; ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm]; } else{ // In the fluid approximation, hierarchy is cut at lmax = 2 and q dependence is integrated out: ppv->l_max_ncdm[n_ncdm] = 2; ppv->q_size_ncdm[n_ncdm] = 1; } index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm]; } } /* metric (only quantities to be integrated, not those obeying constraint equations) */ /* metric perturbation eta of synchronous gauge */ class_define_index(ppv->index_pt_eta,ppt->gauge == synchronous,index_pt,1); /* metric perturbation phi of newtonian gauge ( we could fix it using Einstein equations as a constraint equation for phi, but integration is numerically more stable if we actually evolve phi) */ class_define_index(ppv->index_pt_phi,ppt->gauge == newtonian,index_pt,1); } if (_vectors_) { /* Vector baryon velocity: v_b^{(1)}. */ class_define_index(ppv->index_pt_theta_b,_TRUE_,index_pt,1); /* eventually reject inconsistent values of the number of mutipoles in photon temperature hierarchy and polarization*/ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppv->l_max_g = ppr->l_max_g_ten; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */ ppv->l_max_pol_g = ppr->l_max_pol_g_ten; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */ class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */ class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */ class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */ } } /** - (a) metric perturbations V or \f$ h_v \f$ depending on gauge */ if (ppt->gauge == synchronous){ class_define_index(ppv->index_pt_hv_prime,_TRUE_,index_pt,1); } if (ppt->gauge == newtonian){ class_define_index(ppv->index_pt_V,_TRUE_,index_pt,1); } } if (_tensors_) { /* reject inconsistent values of the number of mutipoles in photon temperature hierarchy */ class_test(ppr->l_max_g_ten < 4, ppt->error_message, "ppr->l_max_g_ten should be at least 4, i.e. we must integrate at least over photon density, velocity, shear, third momentum"); /* reject inconsistent values of the number of mutipoles in photon polarization hierarchy */ class_test(ppr->l_max_pol_g_ten < 4, ppt->error_message, "ppr->l_max_pol_g_ten should be at least 4"); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppv->l_max_g = ppr->l_max_g_ten; class_define_index(ppv->index_pt_delta_g,_TRUE_,index_pt,1); /* photon density */ class_define_index(ppv->index_pt_theta_g,_TRUE_,index_pt,1); /* photon velocity */ class_define_index(ppv->index_pt_shear_g,_TRUE_,index_pt,1); /* photon shear */ class_define_index(ppv->index_pt_l3_g,_TRUE_,index_pt,ppv->l_max_g-2); /* photon l=3 */ ppv->l_max_pol_g = ppr->l_max_pol_g_ten; class_define_index(ppv->index_pt_pol0_g,_TRUE_,index_pt,1); /* photon polarization, l=0 */ class_define_index(ppv->index_pt_pol1_g,_TRUE_,index_pt,1); /* photon polarization, l=1 */ class_define_index(ppv->index_pt_pol2_g,_TRUE_,index_pt,1); /* photon polarization, l=2 */ class_define_index(ppv->index_pt_pol3_g,_TRUE_,index_pt,ppv->l_max_pol_g-2); /* photon polarization, l=3 */ } } /* ultra relativistic neutrinos */ class_define_index(ppv->index_pt_delta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur density */ class_define_index(ppv->index_pt_theta_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur velocity */ class_define_index(ppv->index_pt_shear_ur,ppt->evolve_tensor_ur,index_pt,1); /* ur shear */ ppv->l_max_ur = ppr->l_max_ur; class_define_index(ppv->index_pt_l3_ur,ppt->evolve_tensor_ur,index_pt,ppv->l_max_ur-2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3) */ if (ppt->evolve_tensor_ncdm == _TRUE_) { ppv->index_pt_psi0_ncdm1 = index_pt; ppv->N_ncdm = pba->N_ncdm; class_alloc(ppv->l_max_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); class_alloc(ppv->q_size_ncdm,ppv->N_ncdm*sizeof(double),ppt->error_message); for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ // Set value of ppv->l_max_ncdm: class_test(ppr->l_max_ncdm < 4, ppt->error_message, "ppr->l_max_ncdm=%d should be at least 4, i.e. we must integrate at least over first four momenta of non-cold dark matter perturbed phase-space distribution",n_ncdm); //Copy value from precision parameter: ppv->l_max_ncdm[n_ncdm] = ppr->l_max_ncdm; ppv->q_size_ncdm[n_ncdm] = pba->q_size_ncdm[n_ncdm]; index_pt += (ppv->l_max_ncdm[n_ncdm]+1)*ppv->q_size_ncdm[n_ncdm]; } } /** - (b) metric perturbation h is a propagating degree of freedom, so h and hdot are included in the vector of ordinary perturbations, no in that of metric perturbations */ class_define_index(ppv->index_pt_gw,_TRUE_,index_pt,1); /* tensor metric perturbation h (gravitational waves) */ class_define_index(ppv->index_pt_gwdot,_TRUE_,index_pt,1); /* its time-derivative */ } ppv->pt_size = index_pt; /** - allocate vectors for storing the values of all these quantities and their time-derivatives at a given time */ class_calloc(ppv->y,ppv->pt_size,sizeof(double),ppt->error_message); class_alloc(ppv->dy,ppv->pt_size*sizeof(double),ppt->error_message); class_alloc(ppv->used_in_sources,ppv->pt_size*sizeof(int),ppt->error_message); /** - specify which perturbations are needed in the evaluation of source terms */ /* take all of them by default */ for (index_pt=0; index_pt<ppv->pt_size; index_pt++) ppv->used_in_sources[index_pt] = _TRUE_; /* indicate which ones are not needed (this is just for saving time, omitting perturbations in this list will not change the results!) */ if (_scalars_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* we don't need temperature multipoles above l=2 (but they are defined only when rsa and tca are off) */ for (index_pt=ppv->index_pt_l3_g; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; /* for polarization, we only need l=0,2 (but l =1,3, ... are defined only when rsa and tca are off) */ ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_; for (index_pt=ppv->index_pt_pol3_g; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { /* we don't need ur multipoles above l=2 (but they are defined only when rsa and ufa are off) */ for (index_pt=ppv->index_pt_l3_ur; index_pt <= ppv->index_pt_delta_ur+ppv->l_max_ur; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } } if (pba->has_ncdm == _TRUE_) { /* we don't need ncdm multipoles above l=2 (but they are defined only when ncdmfa is off) */ index_pt = ppv->index_pt_psi0_ncdm1; for(n_ncdm = 0; n_ncdm < ppv-> N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ if (l>2) ppv->used_in_sources[index_pt]=_FALSE_; index_pt++; } } } } } if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* we don't need temperature multipoles above except l=0,2,4 */ ppv->used_in_sources[ppv->index_pt_theta_g]=_FALSE_; ppv->used_in_sources[ppv->index_pt_l3_g]=_FALSE_; for (index_pt=ppv->index_pt_delta_g+5; index_pt <= ppv->index_pt_delta_g+ppv->l_max_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; /* same for polarization, we only need l=0,2,4 */ ppv->used_in_sources[ppv->index_pt_pol1_g]=_FALSE_; ppv->used_in_sources[ppv->index_pt_pol3_g]=_FALSE_; for (index_pt=ppv->index_pt_pol0_g+5; index_pt <= ppv->index_pt_pol0_g+ppv->l_max_pol_g; index_pt++) ppv->used_in_sources[index_pt]=_FALSE_; } } /* we need h' but not h */ ppv->used_in_sources[ppv->index_pt_gw]=_FALSE_; } /** - case of setting initial conditions for a new wavenumber */ if (pa_old == NULL) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: initializing vector at tau=%e\n",k,tau); if (_scalars_) { /** - --> (a) check that current approximation scheme is consistent with initial conditions */ class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on, ppt->error_message, "scalar initial conditions assume radiation streaming approximation turned off"); if (pba->has_ur == _TRUE_) { class_test(ppw->approx[ppw->index_ap_ufa] == (int)ufa_on, ppt->error_message, "scalar initial conditions assume ur fluid approximation turned off"); } if (pba->has_ncdm == _TRUE_) { class_test(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on, ppt->error_message, "scalar initial conditions assume ncdm fluid approximation turned off"); } class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off, ppt->error_message, "scalar initial conditions assume tight-coupling approximation turned on"); } if (_tensors_) { class_test(ppw->approx[ppw->index_ap_tca] == (int)tca_off, ppt->error_message, "tensor initial conditions assume tight-coupling approximation turned on"); class_test(ppw->approx[ppw->index_ap_rsa] == (int)rsa_on, ppt->error_message, "tensor initial conditions assume radiation streaming approximation turned off"); } /** - --> (b) let ppw-->pv points towards the perturb_vector structure that we just created */ ppw->pv = ppv; /** - --> (c) fill the vector ppw-->pv-->y with appropriate initial conditions */ class_call(perturb_initial_conditions(ppr, pba, ppt, index_md, index_ic, k, tau, ppw), ppt->error_message, ppt->error_message); } /** - case of switching approximation while a wavenumber is being integrated */ else { /** - --> (a) for the scalar mode: */ if (_scalars_) { /** - ---> (a.1.) check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** - ---> (a.2.) some variables (b, cdm, fld, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ ppv->y[ppv->index_pt_delta_b] = ppw->pv->y[ppw->pv->index_pt_delta_b]; ppv->y[ppv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_b]; if (pba->has_cdm == _TRUE_) { ppv->y[ppv->index_pt_delta_cdm] = ppw->pv->y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == newtonian) { ppv->y[ppv->index_pt_theta_cdm] = ppw->pv->y[ppw->pv->index_pt_theta_cdm]; } } if (pba->has_dcdm == _TRUE_) { ppv->y[ppv->index_pt_delta_dcdm] = ppw->pv->y[ppw->pv->index_pt_delta_dcdm]; ppv->y[ppv->index_pt_theta_dcdm] = ppw->pv->y[ppw->pv->index_pt_theta_dcdm]; } if (pba->has_dr == _TRUE_){ for (l=0; l <= ppv->l_max_dr; l++) ppv->y[ppv->index_pt_F0_dr+l] = ppw->pv->y[ppw->pv->index_pt_F0_dr+l]; } if (pba->has_fld == _TRUE_) { ppv->y[ppv->index_pt_delta_fld] = ppw->pv->y[ppw->pv->index_pt_delta_fld]; ppv->y[ppv->index_pt_theta_fld] = ppw->pv->y[ppw->pv->index_pt_theta_fld]; } if (pba->has_scf == _TRUE_) { ppv->y[ppv->index_pt_omega_scf] = ppw->pv->y[ppw->pv->index_pt_omega_scf]; ppv->y[ppv->index_pt_delta0_scf] = ppw->pv->y[ppw->pv->index_pt_delta0_scf]; ppv->y[ppv->index_pt_delta1_scf] = ppw->pv->y[ppw->pv->index_pt_delta1_scf]; } if (ppt->gauge == synchronous) ppv->y[ppv->index_pt_eta] = ppw->pv->y[ppw->pv->index_pt_eta]; if (ppt->gauge == newtonian) ppv->y[ppv->index_pt_phi] = ppw->pv->y[ppw->pv->index_pt_phi]; /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* tight-coupling approximation for shear_g (previously computed in perturb_derivs: perturb_derivs is always called at the end of generic_evolver, in order to update all quantities in ppw to the time at which the approximation is switched off) */ ppv->y[ppv->index_pt_shear_g] = ppw->tca_shear_g; ppv->y[ppv->index_pt_l3_g] = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->s_l[3]*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for l=3 */ ppv->y[ppv->index_pt_pol0_g] = 2.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=0 */ ppv->y[ppv->index_pt_pol1_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*(5.-2.*ppw->s_l[2])/6.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=1 */ ppv->y[ppv->index_pt_pol2_g] = 0.5*ppv->y[ppv->index_pt_shear_g]; /* first-order tight-coupling approximation for polarization, l=2 */ ppv->y[ppv->index_pt_pol3_g] = k/ppw->pvecthermo[pth->index_th_dkappa]*3.*ppw->s_l[3]/14.*ppv->y[ppv->index_pt_shear_g]; /* second-order tight-coupling approximation for polarization, l=3 */ if (pba->has_ur == _TRUE_) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } } if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){ // This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly. ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } /* perturbed recombination */ /* the initial conditions are set when tca is switched off (current block) */ if (ppt->has_perturbed_recombination == _TRUE_){ ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = 1./3.*ppv->y[ppw->pv->index_pt_delta_b]; ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] =0.; } } // end of block tca ON -> tca OFF /* perturbed recombination */ /* For any other transition in the approximation scheme, we should just copy the value of the perturbations, provided tca is already off (otherwise the indices are not yet allocated). For instance, we do not want to copy the values in the (k,tau) region where both UFA and TCA are engaged.*/ if ((ppt->has_perturbed_recombination == _TRUE_)&&(pa_old[ppw->index_ap_tca]==(int)tca_off)){ ppv->y[ppv->index_pt_perturbed_recombination_delta_temp] = ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; ppv->y[ppv->index_pt_perturbed_recombination_delta_chi] = ppw->pv->y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } } /* -- case of switching on ur fluid approximation. Provide correct initial conditions to new set of variables */ if (pba->has_ur == _TRUE_) { if ((pa_old[ppw->index_ap_ufa] == (int)ufa_off) && (ppw->approx[ppw->index_ap_ufa] == (int)ufa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on ur fluid approximation at tau=%e\n",k,tau); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; } if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { ppv->y[ppv->index_pt_shear_g] = ppw->pv->y[ppw->pv->index_pt_shear_g]; ppv->y[ppv->index_pt_l3_g] = ppw->pv->y[ppw->pv->index_pt_l3_g]; for (l = 4; l <= ppw->pv->l_max_g; l++) { ppv->y[ppv->index_pt_delta_g+l] = ppw->pv->y[ppw->pv->index_pt_delta_g+l]; } ppv->y[ppv->index_pt_pol0_g] = ppw->pv->y[ppw->pv->index_pt_pol0_g]; ppv->y[ppv->index_pt_pol1_g] = ppw->pv->y[ppw->pv->index_pt_pol1_g]; ppv->y[ppv->index_pt_pol2_g] = ppw->pv->y[ppw->pv->index_pt_pol2_g]; ppv->y[ppv->index_pt_pol3_g] = ppw->pv->y[ppw->pv->index_pt_pol3_g]; for (l = 4; l <= ppw->pv->l_max_pol_g; l++) { ppv->y[ppv->index_pt_pol0_g+l] = ppw->pv->y[ppw->pv->index_pt_pol0_g+l]; } } if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; } if (pba->has_ncdm == _TRUE_) { index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm]; l++){ /* This is correct even when ncdmfa == off, since ppv->l_max_ncdm and ppv->q_size_ncdm is updated.*/ ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } } } /* -- case of switching on ncdm fluid approximation. Provide correct initial conditions to new set of variables */ if (pba->has_ncdm == _TRUE_) { if ((pa_old[ppw->index_ap_ncdmfa] == (int)ncdmfa_off) && (ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on ncdm fluid approximation at tau=%e\n",k,tau); if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_g] = ppw->pv->y[ppw->pv->index_pt_delta_g]; ppv->y[ppv->index_pt_theta_g] = ppw->pv->y[ppw->pv->index_pt_theta_g]; } if ((ppw->approx[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off)) { ppv->y[ppv->index_pt_shear_g] = ppw->pv->y[ppw->pv->index_pt_shear_g]; ppv->y[ppv->index_pt_l3_g] = ppw->pv->y[ppw->pv->index_pt_l3_g]; for (l = 4; l <= ppw->pv->l_max_g; l++) { ppv->y[ppv->index_pt_delta_g+l] = ppw->pv->y[ppw->pv->index_pt_delta_g+l]; } ppv->y[ppv->index_pt_pol0_g] = ppw->pv->y[ppw->pv->index_pt_pol0_g]; ppv->y[ppv->index_pt_pol1_g] = ppw->pv->y[ppw->pv->index_pt_pol1_g]; ppv->y[ppv->index_pt_pol2_g] = ppw->pv->y[ppw->pv->index_pt_pol2_g]; ppv->y[ppv->index_pt_pol3_g] = ppw->pv->y[ppw->pv->index_pt_pol3_g]; for (l = 4; l <= ppw->pv->l_max_pol_g; l++) { ppv->y[ppv->index_pt_pol0_g+l] = ppw->pv->y[ppw->pv->index_pt_pol0_g+l]; } } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; if (ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } } } a = ppw->pvecback[pba->index_bg_a]; index_pt = ppw->pv->index_pt_psi0_ncdm1; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ // We are in the fluid approximation, so ncdm_l_size is always 3. ncdm_l_size = ppv->l_max_ncdm[n_ncdm]+1; rho_plus_p_ncdm = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; for(l=0; l<=2; l++){ ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+l] = 0.0; } factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for(index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++){ // Integrate over distributions: q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] += pba->w_ncdm[n_ncdm][index_q]*q2*epsilon* ppw->pv->y[index_pt]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] += pba->w_ncdm[n_ncdm][index_q]*q2*q* ppw->pv->y[index_pt+1]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] += pba->w_ncdm[n_ncdm][index_q]*q2*q2/epsilon* ppw->pv->y[index_pt+2]; //Jump to next momentum bin in ppw->pv->y: index_pt += (ppw->pv->l_max_ncdm[n_ncdm]+1); } ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm] *=factor/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+1] *=k*factor/rho_plus_p_ncdm; ppv->y[ppv->index_pt_psi0_ncdm1+ncdm_l_size*n_ncdm+2] *=2.0/3.0*factor/rho_plus_p_ncdm; } } } } /** - --> (b) for the vector mode */ if (_vectors_) { /** - ---> (b.1.) check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** - ---> (b.2.) some variables (gw, gwdot, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ if (ppt->gauge == synchronous){ ppv->y[ppv->index_pt_hv_prime] = ppw->pv->y[ppw->pv->index_pt_hv_prime]; } if (ppt->gauge == newtonian){ ppv->y[ppv->index_pt_V] = ppw->pv->y[ppw->pv->index_pt_V]; } ppv->y[ppv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_b]; /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = 0.0; //TBC //-4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; ppv->y[ppv->index_pt_pol0_g] = 0.0; //TBC //1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); } } /** - --> (c) for the tensor mode */ if (_tensors_) { /** - ---> (c.1.) check that the change of approximation scheme makes sense (note: before calling this routine there is already a check that we wish to change only one approximation flag at a time) */ class_test((pa_old[ppw->index_ap_tca] == (int)tca_off) && (ppw->approx[ppw->index_ap_tca] == (int)tca_on), ppt->error_message, "at tau=%g: the tight-coupling approximation can be switched off, not on",tau); /** - ---> (c.2.) some variables (gw, gwdot, ...) are not affected by any approximation. They need to be reconducted whatever the approximation switching is. We treat them here. Below we will treat other variables case by case. */ ppv->y[ppv->index_pt_gw] = ppw->pv->y[ppw->pv->index_pt_gw]; ppv->y[ppv->index_pt_gwdot] = ppw->pv->y[ppw->pv->index_pt_gwdot]; if (ppt->evolve_tensor_ur == _TRUE_){ /* For now, neutrinos go here. */ ppv->y[ppv->index_pt_delta_ur] = ppw->pv->y[ppw->pv->index_pt_delta_ur]; ppv->y[ppv->index_pt_theta_ur] = ppw->pv->y[ppw->pv->index_pt_theta_ur]; ppv->y[ppv->index_pt_shear_ur] = ppw->pv->y[ppw->pv->index_pt_shear_ur]; ppv->y[ppv->index_pt_l3_ur] = ppw->pv->y[ppw->pv->index_pt_l3_ur]; for (l=4; l <= ppv->l_max_ur; l++) ppv->y[ppv->index_pt_delta_ur+l] = ppw->pv->y[ppw->pv->index_pt_delta_ur+l]; } if (ppt->evolve_tensor_ncdm == _TRUE_){ index_pt = 0; for(n_ncdm = 0; n_ncdm < ppv->N_ncdm; n_ncdm++){ for(index_q=0; index_q < ppv->q_size_ncdm[n_ncdm]; index_q++){ for(l=0; l<=ppv->l_max_ncdm[n_ncdm];l++){ // This is correct with or without ncdmfa, since ppv->lmax_ncdm is set accordingly. ppv->y[ppv->index_pt_psi0_ncdm1+index_pt] = ppw->pv->y[ppw->pv->index_pt_psi0_ncdm1+index_pt]; index_pt++; } } } } /* -- case of switching off tight coupling approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_tca] == (int)tca_on) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch off tight-coupling approximation at tau=%e\n",k,tau); ppv->y[ppv->index_pt_delta_g] = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; ppv->y[ppv->index_pt_pol0_g] = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; } /* -- case of switching on radiation streaming approximation. Provide correct initial conditions to new set of variables */ if ((pa_old[ppw->index_ap_rsa] == (int)rsa_off) && (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on)) { if (ppt->perturbations_verbose>2) fprintf(stdout,"Mode k=%e: switch on radiation streaming approximation at tau=%e with Omega_r=%g\n",k,tau,ppw->pvecback[pba->index_bg_Omega_r]); } } /** - --> (d) free the previous vector of perturbations */ class_call(perturb_vector_free(ppw->pv), ppt->error_message, ppt->error_message); /** - --> (e) let ppw-->pv points towards the perturb_vector structure that we just created */ ppw->pv = ppv; } return _SUCCESS_; } /** * Free the perturb_vector structure. * * @param pv Input: pointer to perturb_vector structure to be freed * @return the error status */ int perturb_vector_free( struct perturb_vector * pv ) { if (pv->l_max_ncdm != NULL) free(pv->l_max_ncdm); if (pv->q_size_ncdm != NULL) free(pv->q_size_ncdm); free(pv->y); free(pv->dy); free(pv->used_in_sources); free(pv); return _SUCCESS_; } /** * For each mode, wavenumber and initial condition, this function * initializes in the vector all values of perturbed variables (in a * given gauge). It is assumed here that all values have previously been * set to zero, only non-zero values are set here. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param index_ic Input: index of initial condition under consideration (ad, iso...) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: workspace containing in input the approximation scheme, the background/thermodynamics/metric quantities, and eventually the previous vector y; and in output the new vector y. * @return the error status */ int perturb_initial_conditions(struct precision * ppr, struct background * pba, struct perturbs * ppt, int index_md, int index_ic, double k, double tau, struct perturb_workspace * ppw ) { /** Summary: */ /** --> Declare local variables */ double a,a_prime_over_a; double delta_ur=0.,theta_ur=0.,shear_ur=0.,l3_ur=0.,eta=0.,delta_cdm=0.,alpha, alpha_prime; double delta_dr=0; double q,epsilon,k2; int index_q,n_ncdm,idx; double rho_r,rho_m,rho_nu,rho_m_over_rho_r; double fracnu,fracg,fracb,fraccdm,om; double ktau_two,ktau_three; double f_dr; double delta_tot; double velocity_tot; double s2_squared; /** Auxiliar variables for scalar field */ double theta_phi, y1_phi; if (_scalars_) { /** - (a) compute relevant background quantities: compute rho_r, rho_m, rho_nu (= all relativistic except photons), and their ratio. */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_normal, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); a = ppw->pvecback[pba->index_bg_a]; a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a; /* 8piG/3 rho_r(t_i) */ rho_r = ppw->pvecback[pba->index_bg_rho_g]; /* 8piG/3 rho_m(t_i) */ rho_m = ppw->pvecback[pba->index_bg_rho_b]; /* 8piG/3 rho_nu(t_i) (all neutrinos and collisionless relics being relativistic at that time) */ rho_nu = 0.; if (pba->has_cdm == _TRUE_) { rho_m += ppw->pvecback[pba->index_bg_rho_cdm]; } if (pba->has_dcdm == _TRUE_) { rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } /* Include scalar field as part of the matter budget always. */ /*if (pba->has_scf == _TRUE_) { rho_m += ppw->pvecback[pba->index_bg_rho_scf]; }*/ if (pba->has_dr == _TRUE_) { rho_r += ppw->pvecback[pba->index_bg_rho_dr]; rho_nu += ppw->pvecback[pba->index_bg_rho_dr]; } if (pba->has_ur == _TRUE_) { rho_r += ppw->pvecback[pba->index_bg_rho_ur]; rho_nu += ppw->pvecback[pba->index_bg_rho_ur]; } if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm<pba->N_ncdm; n_ncdm++){ rho_r += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm]; rho_nu += ppw->pvecback[pba->index_bg_rho_ncdm1 + n_ncdm]; } } class_test(rho_r == 0., ppt->error_message, "stop to avoid division by zero"); /* f_nu = Omega_nu(t_i) / Omega_r(t_i) */ fracnu = rho_nu/rho_r; /* f_g = Omega_g(t_i) / Omega_r(t_i) */ fracg = ppw->pvecback[pba->index_bg_rho_g]/rho_r; /* f_b = Omega_b(t_i) / Omega_m(t_i) */ fracb = ppw->pvecback[pba->index_bg_rho_b]/rho_m; /* f_cdm = Omega_cdm(t_i) / Omega_m(t_i) */ fraccdm = 1.-fracb; /* Omega_m(t_i) / Omega_r(t_i) */ rho_m_over_rho_r = rho_m/rho_r; /* omega = Omega_m(t_i) a(t_i) H(t_i) / sqrt(Omega_r(t_i)) = Omega_m(t_0) a(t_0) H(t_0) / sqrt(Omega_r(t_0)) assuming rho_m in a-3 and rho_r in a^-4 = (8piG/3 rho_m(t_i)) a(t_i) / sqrt(8piG/3 rho_r(t_i)) in Mpc-1 This (a priori strange) parameter is the relevant one for expressing a as a function of tau during radiation and matter domination (but not DE domination). Indeed the exact solution of Friedmann when there is only radiation and matter in the universe is a = [H(t_0)^2 Omega_m(t_0) a(t_0)^3 / 4] x [tau^2 + 4 tau / omega] */ om = a*rho_m/sqrt(rho_r); /* (k tau)^2, (k tau)^3 */ ktau_two=k*k*tau*tau; ktau_three=k*tau*ktau_two; /* curvature-dependent factors */ s2_squared = 1.-3.*pba->K/k/k; /** - (b) starts by setting everything in synchronous gauge. If another gauge is needed, we will perform a gauge transformation below. */ /** - --> (b.1.) adiabatic */ if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) { /* The following formulas are valid at leading order in (k*tau) and (om*tau), and order zero in tight-coupling. Identical to first order terms in CRS, except for normalization (when ppr->curvature_ini=1, tau=1: leads to factor 1/2 difference between CRS formulas with beta1=0). Identical to CAMB when om set to zero in theta_g, theta_ur, shear_ur, tau In the non-flat case the relation R=eta is still valid outside the horizon for adiabatic IC. Hence eta is still set to ppr->curvature_ini at leading order. Factors s2 appear through the solution of Einstein equations and equations of motion. */ /* photon density */ ppw->pv->y[ppw->pv->index_pt_delta_g] = - ktau_two/3. * (1.-om*tau/5.) * ppr->curvature_ini * s2_squared; /* photon velocity */ ppw->pv->y[ppw->pv->index_pt_theta_g] = - k*ktau_three/36. * (1.-3.*(1.+5.*fracb-fracnu)/20./(1.-fracnu)*om*tau) * ppr->curvature_ini * s2_squared; /* tighly-coupled baryons */ ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* baryon density */ ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; /* baryon velocity */ if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* cdm density */ /* cdm velocity vanishes in the synchronous gauge */ } if (pba->has_dcdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_dcdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* dcdm density */ /* dcdm velocity velocity vanishes initially in the synchronous gauge */ } /* fluid (assumes wa=0, if this is not the case the fluid will catch anyway the attractor solution) */ if (pba->has_fld == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_fld] = - ktau_two/4.*(1.+pba->w0_fld+pba->wa_fld)*(4.-3.*pba->cs2_fld)/(4.-6.*(pba->w0_fld+pba->wa_fld)+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC: curvature ppw->pv->y[ppw->pv->index_pt_theta_fld] = - k*ktau_three/4.*pba->cs2_fld/(4.-6.*(pba->w0_fld+pba->wa_fld)+3.*pba->cs2_fld) * ppr->curvature_ini * s2_squared; /* from 1004.5509 */ //TBC:curvature } if (pba->has_scf == _TRUE_) { /** Scalar field variables from the background */ theta_phi = ppw->pvecback[pba->index_bg_theta_phi_scf]; y1_phi = ppw->pvecback[pba->index_bg_y_phi_scf]; /** Initial conditions in new formalism. Notice that by definition: omega=k^2/(H^2 y1) */ ppw->pv->y[ppw->pv->index_pt_omega_scf] = k*k/(pow(a*ppw->pvecback[pba->index_bg_H],2.)*y1_phi); /** Canonical field (solving for the perturbations): Attractor solution around the critical point of the perturbation equations */ ppw->pv->y[ppw->pv->index_pt_delta0_scf] = (3./7.)*ppw->pv->y[ppw->pv->index_pt_delta_g]*sin(0.5*theta_phi)*sin(theta_phi/12.); ppw->pv->y[ppw->pv->index_pt_delta1_scf] = (3./7.)*ppw->pv->y[ppw->pv->index_pt_delta_g]*sin(0.5*theta_phi)*cos(theta_phi/12.); } /* all relativistic relics: ur, early ncdm, dr */ if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; /* density of ultra-relativistic neutrinos/relics */ theta_ur = - k*ktau_three/36./(4.*fracnu+15.) * (4.*fracnu+11.+12.*s2_squared-3.*(8.*fracnu*fracnu+50.*fracnu+275.)/20./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini * s2_squared; /* velocity of ultra-relativistic neutrinos/relics */ //TBC shear_ur = ktau_two/(45.+12.*fracnu) * (3.*s2_squared-1.) * (1.+(4.*fracnu-5.)/4./(2.*fracnu+15.)*tau*om) * ppr->curvature_ini;//TBC /s2_squared; /* shear of ultra-relativistic neutrinos/relics */ //TBC:0 l3_ur = ktau_three*2./7./(12.*fracnu+45.)* ppr->curvature_ini;//TBC if (pba->has_dr == _TRUE_) delta_dr = delta_ur; } /* synchronous metric perturbation eta */ //eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)) / s2_squared; //eta = ppr->curvature_ini * s2_squared * (1.-ktau_two/12./(15.+4.*fracnu)*(15.*s2_squared-10.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)); eta = ppr->curvature_ini * (1.-ktau_two/12./(15.+4.*fracnu)*(5.+4.*s2_squared*fracnu - (16.*fracnu*fracnu+280.*fracnu+325)/10./(2.*fracnu+15.)*tau*om)); } /* isocurvature initial conditions taken from Bucher, Moodely, Turok 99, with just a different normalization convention for tau and the scale factor. [k tau] from BMT99 is left invariant because it is the ratio [k/aH]. But [Omega_i,0 tau] from BMT99 must be replaced by [frac_i*om*tau/4]. Some doubts remain about the niv formulas, that should be recheked at some point. We also checked that for bi,cdi,nid, everything coincides exactly with the CAMB formulas. */ /** - --> (b.2.) Cold dark matter Isocurvature */ if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) { class_test(pba->has_cdm == _FALSE_, ppt->error_message, "not consistent to ask for CDI in absence of CDM!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fraccdm*om*tau*(-2./3.+om*tau/4.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fraccdm*om*ktau_two/12.; ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; ppw->pv->y[ppw->pv->index_pt_delta_cdm] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g]; shear_ur = -ppr->entropy_ini*fraccdm*ktau_two*tau*om/6./(2.*fracnu+15.); } eta = -ppr->entropy_ini*fraccdm*om*tau*(1./6.-om*tau/16.); } /** - --> (b.3.) Baryon Isocurvature */ if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) { ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracb*om*tau*(-2./3.+om*tau/4.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracb*om*ktau_two/12.; ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini+3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; } if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_)) { delta_ur = ppw->pv->y[ppw->pv->index_pt_delta_g]; theta_ur = ppw->pv->y[ppw->pv->index_pt_theta_g]; shear_ur = -ppr->entropy_ini*fracb*ktau_two*tau*om/6./(2.*fracnu+15.); } eta = -ppr->entropy_ini*fracb*om*tau*(1./6.-om*tau/16.); } /** - --> (b.4.) Neutrino density Isocurvature */ if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) { class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_), ppt->error_message, "not consistent to ask for NID in absence of ur or ncdm species!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*fracnu/fracg*(-1.+ktau_two/6.); ppw->pv->y[ppw->pv->index_pt_theta_g] = -ppr->entropy_ini*fracnu/fracg*k*k*tau*(1./4.-fracb/fracg*3./16.*om*tau); ppw->pv->y[ppw->pv->index_pt_delta_b] = ppr->entropy_ini*fracnu/fracg/8.*ktau_two; ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*fracnu*fracb/fracg/80.*ktau_two*om*tau; } delta_ur = ppr->entropy_ini*(1.-ktau_two/6.); theta_ur = ppr->entropy_ini*k*k*tau/4.; shear_ur = ppr->entropy_ini*ktau_two/(4.*fracnu+15.)/2.; eta = -ppr->entropy_ini*fracnu/(4.*fracnu+15.)/6.*ktau_two; } /** - --> (b.5.) Neutrino velocity Isocurvature */ if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) { class_test((pba->has_ur == _FALSE_) && (pba->has_ncdm == _FALSE_), ppt->error_message, "not consistent to ask for NIV in absence of ur or ncdm species!"); ppw->pv->y[ppw->pv->index_pt_delta_g] = ppr->entropy_ini*k*tau*fracnu/fracg* (1. - 3./16.*fracb*(2.+fracg)/fracg*om*tau); /* small diff wrt camb */ ppw->pv->y[ppw->pv->index_pt_theta_g] = ppr->entropy_ini*fracnu/fracg*3./4.*k* (-1.+3./4.*fracb/fracg*om*tau+3./16.*om*om*tau*tau*fracb/fracg/fracg*(fracg-3.*fracb)+ktau_two/6.); ppw->pv->y[ppw->pv->index_pt_delta_b] = 3./4.*ppw->pv->y[ppw->pv->index_pt_delta_g]; /* small diff wrt camb */ ppw->pv->y[ppw->pv->index_pt_theta_b] = ppw->pv->y[ppw->pv->index_pt_theta_g]; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] = -ppr->entropy_ini*9./64.*fracnu*fracb/fracg*k*tau*om*tau; } delta_ur = -ppr->entropy_ini*k*tau*(1.+3./16.*fracb*fracnu/fracg*om*tau); /* small diff wrt camb */ theta_ur = ppr->entropy_ini*3./4.*k*(1. - 1./6.*ktau_two*(4.*fracnu+9.)/(4.*fracnu+5.)); shear_ur = ppr->entropy_ini/(4.*fracnu+15.)*k*tau*(1. + 3.*om*tau*fracnu/(4.*fracnu+15.)); /* small diff wrt camb */ eta = ppr->entropy_ini*fracnu*k*tau*(-1./(4.*fracnu+5.) + (-3./64.*fracb/fracg+15./4./(4.*fracnu+15.)/(4.*fracnu+5.)*om*tau)); /* small diff wrt camb */ } /** - (c) If the needed gauge is really the synchronous gauge, we need to affect the previously computed value of eta to the actual variable eta */ if (ppt->gauge == synchronous) { ppw->pv->y[ppw->pv->index_pt_eta] = eta; } /** - (d) If the needed gauge is the newtonian gauge, we must compute alpha and then perform a gauge transformation for each variable */ if (ppt->gauge == newtonian) { /* alpha is like in Ma & Bertschinger: (h'+6 eta')/(2k^2). We obtain it from the first two Einstein equations: alpha = [eta + 3/2 (a'/a)^2 (delta_rho/rho_c) / k^2 /s_2^2 + 3/2 (a'/a)^3 3 ((rho+p)theta/rho_c) / k^4 / s_2^2] / (a'/a) = [eta + 3/2 (a'/a)^2 / k^2 /s_2^2 {delta_tot + 3 (a'/a) /k^2 velocity_tot}] / (a'/a) with delta_tot = (delta_rho/rho_c) = [rho_r delta_r + rho_m delta_m] / (rho_r + rho_m) = [delta_r + (rho_m/rho_r) delta_m] / (1 + rho_m/rho_r) = [(f_g delta_g + f_nu delta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm delta_cdm)] / (1 + rho_m/rho_r) velocity_tot = ((rho+p)theta/rho_c) = [(4/3) rho_r theta_r + rho_m theta_m] / (rho_r + rho_m) = [(4/3) theta_r + (rho_m/rho_r) theta_m] / (1 + rho_m/rho_r) = [(4/3) (f_g theta_g + f_nu theta_nu) + (rho_m/rho_r) (f_b delta_b + f_cdm 0)] / (1 + rho_m/rho_r) */ if (pba->has_cdm == _TRUE_) delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_cdm]; else if (pba->has_dcdm == _TRUE_) delta_cdm = ppw->pv->y[ppw->pv->index_pt_delta_dcdm]; else delta_cdm=0.; // note: if there are no neutrinos, fracnu, delta_ur and theta_ur below will consistently be zero. delta_tot = (fracg*ppw->pv->y[ppw->pv->index_pt_delta_g]+fracnu*delta_ur+rho_m_over_rho_r*(fracb*ppw->pv->y[ppw->pv->index_pt_delta_b]+fraccdm*delta_cdm))/(1.+rho_m_over_rho_r); velocity_tot = ((4./3.)*(fracg*ppw->pv->y[ppw->pv->index_pt_theta_g]+fracnu*theta_ur) + rho_m_over_rho_r*fracb*ppw->pv->y[ppw->pv->index_pt_theta_b])/(1.+rho_m_over_rho_r); alpha = (eta + 3./2.*a_prime_over_a*a_prime_over_a/k/k/s2_squared*(delta_tot + 3.*a_prime_over_a/k/k*velocity_tot))/a_prime_over_a; ppw->pv->y[ppw->pv->index_pt_phi] = eta - a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_delta_g] -= 4.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_g] += k*k*alpha; ppw->pv->y[ppw->pv->index_pt_delta_b] -= 3.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_b] += k*k*alpha; if (pba->has_cdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_cdm] -= 3.*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_cdm] = k*k*alpha; } if (pba->has_dcdm == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_dcdm] += (-3.*a_prime_over_a - a*pba->Gamma_dcdm)*alpha; ppw->pv->y[ppw->pv->index_pt_theta_dcdm] = k*k*alpha; } /* fluid */ if (pba->has_fld == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_fld] += 3*(1.+pba->w0_fld+pba->wa_fld)*a_prime_over_a*alpha; ppw->pv->y[ppw->pv->index_pt_theta_fld] += k*k*alpha; } /* scalar field: check */ if (pba->has_scf == _TRUE_) { alpha_prime = 0.0; /* - 2. * a_prime_over_a * alpha + eta - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; */ //ppw->pv->y[ppw->pv->index_pt_phi_scf] += 0.0; //alpha*ppw->pvecback[pba->index_bg_phi_prime_scf]; //ppw->pv->y[ppw->pv->index_pt_phi_prime_scf] += 0.0; /* (-2.*a_prime_over_a*alpha*ppw->pvecback[pba->index_bg_phi_prime_scf] -a*a* dV_scf(pba,ppw->pvecback[pba->index_bg_phi_scf])*alpha +ppw->pvecback[pba->index_bg_phi_prime_scf]*alpha_prime); */ } if ((pba->has_ur == _TRUE_) || (pba->has_ncdm == _TRUE_) || (pba->has_dr == _TRUE_)) { delta_ur -= 4.*a_prime_over_a*alpha; theta_ur += k*k*alpha; /* shear and l3 are gauge invariant */ if (pba->has_dr == _TRUE_) delta_dr += (-4.*a_prime_over_a + a*pba->Gamma_dcdm*ppw->pvecback[pba->index_bg_rho_dcdm]/ppw->pvecback[pba->index_bg_rho_dr])*alpha; } } /* end of gauge transformation to newtonian gauge */ /** - (e) In any gauge, we should now implement the relativistic initial conditions in ur and ncdm variables */ if (pba->has_ur == _TRUE_) { ppw->pv->y[ppw->pv->index_pt_delta_ur] = delta_ur; ppw->pv->y[ppw->pv->index_pt_theta_ur] = theta_ur; ppw->pv->y[ppw->pv->index_pt_shear_ur] = shear_ur; ppw->pv->y[ppw->pv->index_pt_l3_ur] = l3_ur; } if (pba->has_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q++) { q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a*a*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); ppw->pv->y[idx] = -0.25 * delta_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+1] = -epsilon/3./q/k*theta_ur* pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+2] = -0.5 * shear_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; ppw->pv->y[idx+3] = -0.25 * l3_ur * pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; //Jump to next momentum bin: idx += (ppw->pv->l_max_ncdm[n_ncdm]+1); } } } if (pba->has_dr == _TRUE_) { f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*ppw->pvecback[pba->index_bg_rho_dr]; ppw->pv->y[ppw->pv->index_pt_F0_dr] = delta_dr*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+1] = 4./(3.*k)*theta_ur*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+2] = 2.*shear_ur*f_dr; ppw->pv->y[ppw->pv->index_pt_F0_dr+3] = l3_ur*f_dr; } } /** --> For tensors */ if (_tensors_) { /** tensor initial conditions take into account the fact that scalar (resp. tensor) \f$ C_l\f$'s are related to the real space power spectrum of curvature (resp. of the tensor part of metric perturbations) \f[ <R(x) R(x)> \ \ \sum_{ij} <h_{ij}(x) h^{ij}(x)> \f] In momentum space it is conventional to use the modes R(k) and h(k) where the quantity h obeying to the equation of propagation: \f[ h'' + \frac{2a'}{a} h + [k2+2K] h = 12\pi Ga2 (\rho+p) \sigma = 8\pi Ga2 p \pi \f] and the power spectra in real space and momentum space are related through: \f[ <R(x) R(x)> = \int \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} <R(k)R(k)^*>\right] = \int \frac{dk}{k} \mathcal{P}_R(k) \f] \f[\sum_{ij} <h_{ij}(x) h^{ij}(x)> = \frac{dk}{k} \left[ \frac{k^3}{2\pi^2} F\left(\frac{k^2}{K}\right) <h(k)h(k)^*>\right] = \int \frac{dk}{k} F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f] where \f$ \mathcal{P}_R\f$ and \f$ \mathcal{P}_h\f$ are the dimensionless spectrum of curvature R, and F is a function of k2/K, where K is the curvature parameter. F is equal to one in flat space (K=0), and coming from the contraction of the laplacian eigentensor \f$ Q_{ij}\f$ with itself. We will give F explicitly below. Similarly the scalar (S) and tensor (T) \f$ C_l\f$'s are given by \f[ C_l^S = 4\pi \int \frac{dk}{k} [\Delta_l^S(q)]^2 \mathcal{P}_R(k) \f] \f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f] The usual convention for the tensor-to-scalar ratio \f$ r = A_t / A_s \f$ at pivot scale = 16 epsilon in single-field inflation is such that for constant \f$ \mathcal{P}_R(k)\f$ and \f$ \mathcal{P}_h(k)\f$, \f[ r = 6 \frac{\mathcal{P}_h(k)}{\mathcal{P}_R(k)} \f] so \f[ \mathcal{P}_h(k) = \frac{\mathcal{P}_R(k) r}{6} = \frac{A_s r}{6} = \frac{A_t}{6} \f] A priori it would make sense to say that for a power-law primordial spectrum there is an extra factor \f$ (k/k_{pivot})^{n_t} \f$ (and eventually running and so on and so forth...) However it has been shown that the minimal models of inflation in a negatively curved bubble lead to \f$ \mathcal{P}_h(k)=\tanh(\pi*\nu/2)\f$. In open models it is customary to define the tensor tilt in a non-flat universe as a deviation from this behavior rather than from true scale-invariance in the above sense. Hence we should have \f[ \mathcal{P}_h(k) = \frac{A_t}{6} [ \tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)}\f] where the brackets \f[ [...] \f] mean "if K<0" Then \f[ C_l^T = 4\pi \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \frac{A_t}{6} [\tanh(\pi*\frac{\nu}{2})] (k/k_{pivot})^{(n_t+...)} \f] In the code, it is then a matter of choice to write: - In the primordial module: \f$ \mathcal{P}_h(k) = \frac{A_t}{6} \tanh{(\pi*\frac{\nu}{2})} (k/k^*)^{n_T}\f$ - In the perturbation initial conditions: \f$ h = 1\f$ - In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 F\left(\frac{k^2}{K}\right) \mathcal{P}_h(k) \f$ or: - In the primordial module: \f$ \mathcal{P}_h(k) = A_t (k/k^*)^{n_T} \f$ - In the perturbation initial conditions: \f$ h = \sqrt{[F\left(\frac{k^2}{K}\right) / 6] \tanh{(\pi*\frac{\nu}{2})}} \f$ - In the spectra module: \f$ C_l^T = \frac{4}{\pi} \int \frac{dk}{k} [\Delta_l^T(q)]^2 \mathcal{P}_h(k) \f$ We choose this last option, such that the primordial and spectra module differ minimally in flat and non-flat space. Then we must impose \f[ h = \sqrt{\left(\frac{F}{6}\right) \tanh{(\pi*\frac{\nu}{2})}} \f] The factor F is found to be given by: \f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{k2(k2-K)}{(k2+3K)(k2+2K)} \mathcal{P}_h(k) \f] Introducing as usual \f$ q2 = k2 - 3K \f$ and using qdq = kdk this gives \f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dk}{k} \frac{(q2-3K)(q2-4K)}{q2(q2-K)} \mathcal{P}_h(k) \f] Using qdq = kdk this is equivalent to \f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{dq}{q} \frac{q2-4K}{q2-K} \mathcal{P}_h(k(q)) \f] Finally, introducing \f$ \nu=q/\sqrt{|K|}\f$ and sgnK=SIGN(k)\f$=\pm 1\f$, this could also be written \f[ \sum_{ij}<h_{ij}(x) h^{ij}(x)> = \int \frac{d\nu}{\nu} \frac{(\nu2-4sgnK)}{(\nu2-sgnK)} \mathcal{P}_h(k(\nu)) \f] Equation (43,44) of Hu, Seljak, White, Zaldarriaga is equivalent to absorbing the above factor \f$ (\nu2-4sgnK)/(\nu2-sgnK)\f$ in the definition of the primordial spectrum. Since the initial condition should be written in terms of k rather than nu, they should read \f[ h = \sqrt{ [k2(k2-K)]/[(k2+3K)(k2+2K)] / 6 * \tanh{(\pi*\frac{\nu}{2})} } \f] We leave the freedom to multiply by an arbitrary number ppr->gw_ini. The standard convention corresponding to standard definitions of r, \f$ A_T\f$, \f$ n_T\f$ is however ppr->gw_ini=1. * */ if (index_ic == ppt->index_ic_ten) { ppw->pv->y[ppw->pv->index_pt_gw] = ppr->gw_ini/_SQRT6_; } k2 = k*k; if (pba->sgnK != 0) { ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(k2*(k2-pba->K)/(k2+3.*pba->K)/(k2+2.*pba->K)); } if (pba->sgnK == -1) { if (k*k+3*pba->K >= 0.) { ppw->pv->y[ppw->pv->index_pt_gw] *= sqrt(tanh(_PI_/2.*sqrt(k2+3*pba->K)/sqrt(-pba->K))); } else { ppw->pv->y[ppw->pv->index_pt_gw] = 0.; } } } return _SUCCESS_; } /** * Evaluate background/thermodynamics at \f$ \tau \f$, infer useful flags / time scales for integrating perturbations. * * Evaluate background quantities at \f$ \tau \f$, as well as thermodynamics for scalar mode; infer useful flags and time scales for integrating the perturbations: * - check whether tight-coupling approximation is needed. * - check whether radiation (photons, massless neutrinos...) perturbations are needed. * - choose step of integration: step = ppr->perturb_integration_stepsize * min_time_scale, where min_time_scale = smallest time scale involved in the equations. There are three time scales to compare: * -# that of recombination, \f$ \tau_c = 1/\kappa' \f$ * -# Hubble time scale, \f$ \tau_h = a/a' \f$ * -# Fourier mode, \f$ \tau_k = 1/k \f$ * * So, in general, min_time_scale = \f$ \min(\tau_c, \tau_b, \tau_h, \tau_k) \f$. * * However, if \f$ \tau_c \ll \tau_h \f$ and \f$ \tau_c * \ll \tau_k \f$, we can use the tight-coupling regime for photons * and write equations in such way that the time scale \f$ * \tau_c \f$ becomes irrelevant (no effective mass term in \f$ * 1/\tau_c \f$). Then, the smallest * scale in the equations is only \f$ \min(\tau_h, \tau_k) \f$. * In practise, it is sufficient to use only the condition \f$ \tau_c \ll \tau_h \f$. * * Also, if \f$ \rho_{matter} \gg \rho_{radiation} \f$ and \f$ k \gg * aH \f$, we can switch off radiation perturbations (i.e. switch on * the free-streaming approximation) and then the smallest scale is * simply \f$ \tau_h \f$. * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: wavenumber * @param tau Input: conformal time * @param ppw Input/Output: in output contains the approximation to be used at this time * @return the error status */ int perturb_approximations( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double tau, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ /* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */ double tau_k; /* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */ double tau_h; /* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */ double tau_c; /** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */ class_test(k == 0., ppt->error_message, "stop to avoid division by zero"); tau_k = 1./k; /** - evaluate background quantities with background_at_tau() and Hubble time scale \f$ \tau_h = a/a' \f$ */ class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), ppw->pvecback), pba->error_message, ppt->error_message); class_test(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a] == 0., ppt->error_message, "aH=0, stop to avoid division by zero"); tau_h = 1./(ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_a]); /** - for scalar modes: */ if (_scalars_) { /** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */ class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */ if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } /** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */ else { /** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa]; class_test(tau_c < 0., ppt->error_message, "tau_c = 1/kappa' should always be positive unless there is something wrong in the thermodynamics module. However you have here tau_c=%e at z=%e, conformal time=%e x_e=%e. (This could come from the interpolation of a too poorly sampled reionisation history?).\n", tau_c, 1./ppw->pvecback[pba->index_bg_a]-1., tau, ppw->pvecthermo[pth->index_th_xe]); /** - ----> (b.2.b) check whether tight-coupling approximation should be on */ if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) && (tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) { ppw->approx[ppw->index_ap_tca] = (int)tca_on; } else { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } } /** - --> (c) free-streaming approximations */ if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) && (tau > pth->tau_free_streaming) && (ppr->radiation_streaming_approximation != rsa_none)) { ppw->approx[ppw->index_ap_rsa] = (int)rsa_on; } else { ppw->approx[ppw->index_ap_rsa] = (int)rsa_off; } if (pba->has_ur == _TRUE_) { if ((tau/tau_k > ppr->ur_fluid_trigger_tau_over_tau_k) && (ppr->ur_fluid_approximation != ufa_none)) { ppw->approx[ppw->index_ap_ufa] = (int)ufa_on; } else { ppw->approx[ppw->index_ap_ufa] = (int)ufa_off; } } if (pba->has_ncdm == _TRUE_) { if ((tau/tau_k > ppr->ncdm_fluid_trigger_tau_over_tau_k) && (ppr->ncdm_fluid_approximation != ncdmfa_none)) { ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_on; } else { ppw->approx[ppw->index_ap_ncdmfa] = (int)ncdmfa_off; } } } /** - for tensor modes: */ if (_tensors_) { /** - --> (a) evaluate thermodynamical quantities with thermodynamics_at_z() */ class_call(thermodynamics_at_z(pba, pth, 1./ppw->pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), ppw->pvecback, ppw->pvecthermo), pth->error_message, ppt->error_message); /** - ---> (b.1.) if \f$ \kappa'=0 \f$, recombination is finished; tight-coupling approximation must be off */ if (ppw->pvecthermo[pth->index_th_dkappa] == 0.) { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } /** - ---> (b.2.) if \f$ \kappa' \neq 0 \f$, recombination is not finished: check tight-coupling approximation */ else { /** - ----> (b.2.a) compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./ppw->pvecthermo[pth->index_th_dkappa]; /** - ----> (b.2.b) check whether tight-coupling approximation should be on */ if ((tau_c/tau_h < ppr->tight_coupling_trigger_tau_c_over_tau_h) && (tau_c/tau_k < ppr->tight_coupling_trigger_tau_c_over_tau_k)) { ppw->approx[ppw->index_ap_tca] = (int)tca_on; } else { ppw->approx[ppw->index_ap_tca] = (int)tca_off; } } if ((tau/tau_k > ppr->radiation_streaming_trigger_tau_over_tau_k) && (tau > pth->tau_free_streaming) && (ppr->radiation_streaming_approximation != rsa_none)) { ppw->approx[ppw->index_ap_rsa] = (int)rsa_on; } else { ppw->approx[ppw->index_ap_rsa] = (int)rsa_off; } } return _SUCCESS_; } /** * Compute typical timescale over which the perturbation equations * vary. Some integrators (e.g. Runge-Kunta) benefit from calling this * routine at each step in order to adapt the next step. * * This is one of the few functions in the code which is passed to the generic_integrator() routine. * Since generic_integrator() should work with functions passed from various modules, the format of the arguments * is a bit special: * - fixed parameters and workspaces are passed through a generic pointer. * generic_integrator() doesn't know the content of this pointer. * - the error management is a bit special: errors are not written as usual to pth->error_message, but to a generic * error_message passed in the list of arguments. * * @param tau Input: conformal time * @param parameters_and_workspace Input: fixed parameters (e.g. indices), workspace, approximation used, etc. * @param timescale Output: perturbation variation timescale (given the approximation used) * @param error_message Output: error message */ int perturb_timescale( double tau, void * parameters_and_workspace, double * timescale, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* (a) time scale of Fourier mode, \f$ \tau_k = 1/k \f$ */ double tau_k; /* (b) time scale of expansion, \f$ \tau_h = a/a' \f$ */ double tau_h; /* (c) time scale of recombination, \f$ \tau_{\gamma} = 1/\kappa' \f$ */ double tau_c; /* various pointers allowing to extract the fields of the parameter_and_workspace input structure */ struct perturb_parameters_and_workspace * pppaw; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; /** - extract the fields of the parameter_and_workspace input structure */ pppaw = parameters_and_workspace; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; /** - compute Fourier mode time scale = \f$ \tau_k = 1/k \f$ */ class_test(pppaw->k == 0., ppt->error_message, "stop to avoid division by zero"); tau_k = 1./pppaw->k; /** - evaluate background quantities with background_at_tau() and Hubble time scale \f$ \tau_h = a/a' \f$ */ class_call(background_at_tau(pba,tau, pba->normal_info, ppw->inter_mode, &(ppw->last_index_back), pvecback), pba->error_message, error_message); class_test(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] == 0., error_message, "aH=0, stop to avoid division by zero"); tau_h = 1./(pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]); /** - for scalars modes: */ if ((ppt->has_scalars == _TRUE_) && (pppaw->index_md == ppt->index_md_scalars)) { *timescale = tau_h; if ((ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) || (pba->has_ncdm == _TRUE_)) *timescale = MIN(tau_k,*timescale); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } /** - for vector modes: */ if ((ppt->has_vectors == _TRUE_) && (pppaw->index_md == ppt->index_md_vectors)) { *timescale = MIN(tau_h,tau_k); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } /** - for tensor modes: */ if ((ppt->has_tensors == _TRUE_) && (pppaw->index_md == ppt->index_md_tensors)) { *timescale = MIN(tau_h,tau_k); if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ ppw->inter_mode, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); if (pvecthermo[pth->index_th_dkappa] != 0.) { /** - --> compute recombination time scale for photons, \f$ \tau_{\gamma} = 1/ \kappa' \f$ */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; *timescale = MIN(tau_c,*timescale); } } } return _SUCCESS_; } /** * Compute metric perturbations (those not integrated over time) using Einstein equations * * @param ppr Input: pointer to precision structure * @param pba Input: pointer to background structure * @param pth Input: pointer to thermodynamics structure * @param ppt Input: pointer to the perturbation structure * @param index_md Input: index of mode under consideration (scalar/.../tensor) * @param k Input: wavenumber * @param tau Input: conformal time * @param y Input: vector of perturbations (those integrated over time) (already allocated) * @param ppw Input/Output: in output contains the updated metric perturbations * @return the error status */ int perturb_einstein( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double tau, double * y, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ double k2,a,a2,a_prime_over_a; double s2_squared; double shear_g = 0.; /** - define wavenumber and scale factor related quantities */ k2 = k*k; a = ppw->pvecback[pba->index_bg_a]; a2 = a * a; a_prime_over_a = ppw->pvecback[pba->index_bg_H]*a; s2_squared = 1.-3.*pba->K/k2; /** - sum up perturbations from all species */ class_call(perturb_total_stress_energy(ppr,pba,pth,ppt,index_md,k,y,ppw), ppt->error_message, ppt->error_message); /** - for scalar modes: */ if (_scalars_) { /** - --> infer metric perturbations from Einstein equations */ /* newtonian gauge */ if (ppt->gauge == newtonian) { /* in principle we could get phi from the constrain equation: ppw->pvecmetric[ppw->index_mt_phi] = -1.5 * (a2/k2/k2/s2/s2) * (k2 * delta_rho + 3.*a_prime_over_a * rho_plus_p_theta); with s2_squared = sqrt(1-3K/k2) = ppw->s_l[2]*ppw->s_l[2] This was the case in class v1.3. However the integration is more stable is we treat phi as a dynamical variable y[ppw->pv->index_pt_phi], which derivative is given by the second equation below (credits to Guido Walter Pettinari). */ /* equation for psi */ ppw->pvecmetric[ppw->index_mt_psi] = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; /* equation for phi' */ ppw->pvecmetric[ppw->index_mt_phi_prime] = -a_prime_over_a * ppw->pvecmetric[ppw->index_mt_psi] + 1.5 * (a2/k2) * ppw->rho_plus_p_theta; /* eventually, infer radiation streaming approximation for gamma and ur (this is exactly the right place to do it because the result depends on h_prime) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw), ppt->error_message, ppt->error_message); } } /* synchronous gauge */ if (ppt->gauge == synchronous) { /* first equation involving total density fluctuation */ ppw->pvecmetric[ppw->index_mt_h_prime] = ( k2 * s2_squared * y[ppw->pv->index_pt_eta] + 1.5 * a2 * ppw->delta_rho)/(0.5*a_prime_over_a); /* h' */ /* eventually, infer radiation streaming approximation for gamma and ur (this is exactly the right place to do it because the result depends on h_prime) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { class_call(perturb_rsa_delta_and_theta(ppr,pba,pth,ppt,k,y,a_prime_over_a,ppw->pvecthermo,ppw), ppt->error_message, ppt->error_message); /* update total theta given rsa approximation results */ ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*ppw->rsa_theta_g; if (pba->has_ur == _TRUE_) { ppw->rho_plus_p_theta += 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*ppw->rsa_theta_ur; } } /* second equation involving total velocity */ ppw->pvecmetric[ppw->index_mt_eta_prime] = (1.5 * a2 * ppw->rho_plus_p_theta + 0.5 * pba->K * ppw->pvecmetric[ppw->index_mt_h_prime])/k2/s2_squared; /* eta' */ /* third equation involving total pressure */ ppw->pvecmetric[ppw->index_mt_h_prime_prime] = - 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_h_prime] + 2. * k2 * s2_squared * y[ppw->pv->index_pt_eta] - 9. * a2 * ppw->delta_p; /* alpha = (h'+6eta')/2k^2 */ ppw->pvecmetric[ppw->index_mt_alpha] = (ppw->pvecmetric[ppw->index_mt_h_prime] + 6.*ppw->pvecmetric[ppw->index_mt_eta_prime])/2./k2; /* eventually, infer first-order tight-coupling approximation for photon shear, then correct the total shear */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) { shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_g]+k2*ppw->pvecmetric[ppw->index_mt_alpha]); ppw->rho_plus_p_shear += 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g; } /* fourth equation involving total shear */ ppw->pvecmetric[ppw->index_mt_alpha_prime] = //TBC - 2. * a_prime_over_a * ppw->pvecmetric[ppw->index_mt_alpha] + y[ppw->pv->index_pt_eta] - 4.5 * (a2/k2) * ppw->rho_plus_p_shear; } /* transform (delta_m, theta_m) of the current gauge into gauge-independent variables (you could comment this out if you really want gauge-dependent results) */ if (ppt->has_source_delta_m == _TRUE_) { ppw->delta_m += 3. *ppw->pvecback[pba->index_bg_a]*ppw->pvecback[pba->index_bg_H] * ppw->theta_m/k2; // note: until 2.4.3 there was a typo, the factor was (-2 H'/H) instead // of (3 aH). There is the same typo in the CLASSgal paper // 1307.1459v1,omega,v3. It came from a confusion between (1+w_total) // and (1+w_matter)=1 [the latter is the relevant one here]. // // note2: at this point this gauge-invariant variable is only // valid if all matter components are pressureless and // stable. This relation will be generalized soon to the case // of decaying dark matter. } if (ppt->has_source_theta_m == _TRUE_) { if (ppt->gauge == synchronous) { ppw->theta_m += ppw->pvecmetric[ppw->index_mt_alpha]*k2; } } } /** - for vector modes */ if (_vectors_) { if (ppt->gauge == newtonian) { ppw->pvecmetric[ppw->index_mt_V_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_V] - 3.*ppw->vector_source_pi/k; } if (ppt->gauge == synchronous) { // assuming vector_source_pi = p_class a^2 pi_T^{(1)} and vector_source_v = (rho_class+p_class)a^2 v^{(1)} // from Hu and White: ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi/k2; // what we suspect: //ppw->pvecmetric[ppw->index_mt_hv_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_hv_prime] - 3.*ppw->vector_source_pi; // if we use the other equation: //ppw->pvecmetric[ppw->index_mt_hv_prime] = -2./k/ (1.-2.*pba->K/k2) * 3. * ppw->vector_source_v; } } /** - for tensor modes */ if (_tensors_) { /* single einstein equation for tensor perturbations */ ppw->pvecmetric[ppw->index_mt_gw_prime_prime] = -2.*a_prime_over_a*y[ppw->pv->index_pt_gwdot]-(k2+2.*pba->K)*y[ppw->pv->index_pt_gw]+ppw->gw_source; } return _SUCCESS_; } int perturb_total_stress_energy( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, int index_md, double k, double * y, struct perturb_workspace * ppw ) { /** Summary: */ /** - define local variables */ double a,a2; double delta_g=0.; double theta_g=0.; double shear_g=0.; double delta_ur=0.; double theta_ur=0.; double shear_ur=0.; double rho_delta_ncdm=0.; double rho_plus_p_theta_ncdm=0.; double rho_plus_p_shear_ncdm=0.; double delta_p_ncdm=0.; double factor; double rho_plus_p_ncdm; int index_q,n_ncdm,idx; double epsilon,q,q2,cg2_ncdm,w_ncdm,rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm; double rho_m,delta_rho_m,rho_plus_p_m,rho_plus_p_theta_m; double w; double gwncdm; double rho_relativistic; double rho_dr_over_f; double delta_rho_scf=0., delta_p_scf=0., rho_plus_p_theta_scf=0., delta0_scf=0., delta1_scf=0., psi; double theta_phi_scf, y_phi_scf, rho_scf; /** - wavenumber and scale factor related quantities */ a = ppw->pvecback[pba->index_bg_a]; a2 = a * a; /** - for scalar modes */ if (_scalars_) { /** - --> (a) deal with approximation schemes */ /** - ---> (a.1.) photons */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** - ----> (a.1.1.) no approximation */ delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; shear_g = y[ppw->pv->index_pt_shear_g]; } else { /** - ----> (a.1.2.) radiation streaming approximation */ delta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */ theta_g = 0.; /* actual free streaming approximation imposed after evaluation of einstein equations */ shear_g = 0.; /* shear always neglected in radiation streaming approximation */ } } else { /** - ----> (a.1.3.) tight coupling approximation */ delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; /* first-order tight-coupling approximation for photon shear */ if (ppt->gauge == newtonian) { shear_g = 16./45./ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_g]; } else { shear_g = 0.; /* in the synchronous gauge, the expression of shear_g (at first-order in a tight-coupling expansion) is a function of h' and eta'; but h' and eta' are calculated in perturb_einstein() as a function of delta_g and theta_g. Hence, we set shear_g temporarily to zero, and set it to the right first-order value in perturb_einstein(), just before using the Einstein equation for the shear. */ } } /** - ---> (a.2.) ur */ if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_ur = y[ppw->pv->index_pt_delta_ur]; theta_ur = y[ppw->pv->index_pt_theta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; } else { delta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */ theta_ur = 0.; /* actual free streaming approximation imposed after evaluation of 1st einstein equation */ shear_ur = 0.; /* shear always neglected in free streaming approximation */ } } /** - --> (b) compute the total density, velocity and shear perturbations */ /* photon and baryon contribution */ ppw->delta_rho = ppw->pvecback[pba->index_bg_rho_g]*delta_g + ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; ppw->rho_plus_p_theta = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*theta_g + ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b]; ppw->rho_plus_p_shear = 4./3.*ppw->pvecback[pba->index_bg_rho_g]*shear_g; ppw->delta_p = 1./3.*ppw->pvecback[pba->index_bg_rho_g]*delta_g + ppw->pvecthermo[pth->index_th_cb2]*ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; /* cdm contribution */ if (pba->has_cdm == _TRUE_) { ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == newtonian) ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm]; } /* dcdm contribution */ if (pba->has_dcdm == _TRUE_) { ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm]; ppw->rho_plus_p_theta += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm]; } /* fluid contribution */ if (pba->has_fld == _TRUE_) { w = pba->w0_fld + pba->wa_fld * (1. - a / pba->a_today); ppw->delta_rho += ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld]; ppw->rho_plus_p_theta += (1.+w)*ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_theta_fld]; ppw->delta_p = ppw->delta_p + pba->cs2_fld * ppw->pvecback[pba->index_bg_rho_fld]*y[ppw->pv->index_pt_delta_fld]; } /* ultra-relativistic decay radiation */ if (pba->has_dr == _TRUE_) { /* We have delta_rho_dr = rho_dr * F0_dr / f, where F follows the convention in astro-ph/9907388 and f is defined as f = rho_dr*a^4/rho_crit_today. In CLASS density units rho_crit_today = H0^2. */ rho_dr_over_f = pow(pba->H0/a2,2); ppw->delta_rho += rho_dr_over_f*y[ppw->pv->index_pt_F0_dr]; ppw->rho_plus_p_theta += 4./3.*3./4*k*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+1]; ppw->rho_plus_p_shear += 2./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr+2]; ppw->delta_p += 1./3.*rho_dr_over_f*y[ppw->pv->index_pt_F0_dr]; } /* ultra-relativistic neutrino/relics contribution */ if (pba->has_ur == _TRUE_) { ppw->delta_rho = ppw->delta_rho + ppw->pvecback[pba->index_bg_rho_ur]*delta_ur; ppw->rho_plus_p_theta = ppw->rho_plus_p_theta + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*theta_ur; ppw->rho_plus_p_shear = ppw->rho_plus_p_shear + 4./3.*ppw->pvecback[pba->index_bg_rho_ur]*shear_ur; ppw->delta_p += 1./3.*ppw->pvecback[pba->index_bg_rho_ur]*delta_ur; } /* non-cold dark matter contribution */ if (pba->has_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){ // The perturbations are evolved integrated: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_ncdm_bg = ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; p_ncdm_bg = ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; pseudo_p_ncdm = ppw->pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; rho_plus_p_ncdm = rho_ncdm_bg + p_ncdm_bg; w_ncdm = p_ncdm_bg/rho_ncdm_bg; cg2_ncdm = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg)); if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { ppw->delta_ncdm[n_ncdm] = y[idx]; ppw->theta_ncdm[n_ncdm] = y[idx+1]; ppw->shear_ncdm[n_ncdm] = y[idx+2]; } ppw->delta_rho += rho_ncdm_bg*y[idx]; ppw->rho_plus_p_theta += rho_plus_p_ncdm*y[idx+1]; ppw->rho_plus_p_shear += rho_plus_p_ncdm*y[idx+2]; ppw->delta_p += cg2_ncdm*rho_ncdm_bg*y[idx]; idx += ppw->pv->l_max_ncdm[n_ncdm]+1; } } else{ // We must integrate to find perturbations: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_delta_ncdm = 0.0; rho_plus_p_theta_ncdm = 0.0; rho_plus_p_shear_ncdm = 0.0; delta_p_ncdm = 0.0; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1]; rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2]; delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } rho_delta_ncdm *= factor; rho_plus_p_theta_ncdm *= k*factor; rho_plus_p_shear_ncdm *= 2.0/3.0*factor; delta_p_ncdm *= factor/3.; if ((ppt->has_source_delta_ncdm == _TRUE_) || (ppt->has_source_theta_ncdm == _TRUE_) || (ppt->has_source_delta_m == _TRUE_)) { ppw->delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; ppw->theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); ppw->shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); } ppw->delta_rho += rho_delta_ncdm; ppw->rho_plus_p_theta += rho_plus_p_theta_ncdm; ppw->rho_plus_p_shear += rho_plus_p_shear_ncdm; ppw->delta_p += delta_p_ncdm; } } } /* scalar field contribution. In Newtonian gauge, delta_scf depends on the metric perturbation psi which is inferred from rho_plus_p_shear. So the contribution from the scalar field must be below all species with non-zero shear. */ if (pba->has_scf == _TRUE_) { theta_phi_scf = ppw->pvecback[pba->index_bg_theta_phi_scf]; y_phi_scf = ppw->pvecback[pba->index_bg_y_phi_scf]; rho_scf = ppw->pvecback[pba->index_bg_rho_scf]; delta0_scf = y[ppw->pv->index_pt_delta0_scf]; delta1_scf = y[ppw->pv->index_pt_delta1_scf]; if (ppt->gauge == synchronous){ /* Version delta0 - delta1 */ delta_rho_scf = rho_scf*delta0_scf; delta_p_scf = rho_scf*(delta1_scf*sin_scf(pba,theta_phi_scf)-delta0_scf*cos_scf(pba,theta_phi_scf)); rho_plus_p_theta_scf = k*k*rho_scf*(-delta0_scf*sin_scf(pba,theta_phi_scf)+delta1_scf*(1.-cos_scf(pba,theta_phi_scf)))/(a*ppw->pvecback[pba->index_bg_H]*y_phi_scf); } else{ /** TODO Write all below for the newtonian gauge **/ /* equation for psi */ psi = y[ppw->pv->index_pt_phi] - 4.5 * (a2/k/k) * ppw->rho_plus_p_shear; delta_rho_scf = 0.*psi; /** 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);*/ delta_p_scf = 0.*psi; /** 1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] - ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*psi);*/ } ppw->delta_rho += delta_rho_scf; ppw->rho_plus_p_theta += rho_plus_p_theta_scf; /*1./3.* k*k/a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_scf];*/ ppw->delta_p += delta_p_scf; } /* store delta_m in the current gauge. In perturb_einstein, this will be transformed later on into the gauge-independent variable D = delta_m - 2H'/H \theta_m/k^2 . */ if (ppt->has_source_delta_m == _TRUE_) { /* include baryons and cold dark matter */ delta_rho_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_delta_b]; rho_m = ppw->pvecback[pba->index_bg_rho_b]; if (pba->has_cdm == _TRUE_) { delta_rho_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_delta_cdm]; rho_m += ppw->pvecback[pba->index_bg_rho_cdm]; } /* include decaying cold dark matter */ if (pba->has_dcdm == _TRUE_) { delta_rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_delta_dcdm]; rho_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } /* include any other species non-relativistic today (like ncdm species) */ if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ delta_rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]*ppw->delta_ncdm[n_ncdm]; rho_m += ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; } } /* Include the scalar field into the matter density constrast */ /*if (pba->has_scf == _TRUE_) { delta_rho_m += delta_rho_scf; rho_m += ppw->pvecback[pba->index_bg_rho_scf]; }*/ /* infer delta_m */ ppw->delta_m = delta_rho_m/rho_m; } /* store theta_m in the current gauge. In perturb_einstein, this will be transformed later on into the gauge-independent variable Theta . Note that computing theta_m is necessary also if we want the delta_m source only, because the gauge-invariant delta_m involves theta_m in the current gauge. */ if ((ppt->has_source_delta_m == _TRUE_) || (ppt->has_source_theta_m == _TRUE_)) { /* include baryons and cold dark matter */ rho_plus_p_theta_m = ppw->pvecback[pba->index_bg_rho_b]*y[ppw->pv->index_pt_theta_b]; rho_plus_p_m = ppw->pvecback[pba->index_bg_rho_b]; if (pba->has_cdm == _TRUE_) { if (ppt->gauge == newtonian) rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_cdm]*y[ppw->pv->index_pt_theta_cdm]; rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_cdm]; } if (pba->has_dcdm == _TRUE_) { rho_plus_p_theta_m += ppw->pvecback[pba->index_bg_rho_dcdm]*y[ppw->pv->index_pt_theta_dcdm]; rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_dcdm]; } /* include any other species non-relativistic today (like ncdm species) */ if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_plus_p_theta_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm])*ppw->theta_ncdm[n_ncdm]; rho_plus_p_m += (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); } } /* Include the scalar field into the matter velocity divergence */ /*if (pba->has_scf == _TRUE_) { rho_plus_p_theta_m += rho_plus_p_theta_scf; rho_plus_p_m += ppw->pvecback[pba->index_bg_rho_scf]+ppw->pvecback[pba->index_bg_p_scf]; }*/ /* infer theta_m */ ppw->theta_m = rho_plus_p_theta_m/rho_plus_p_m; } } /** - for vector modes */ if (_vectors_) { ppw->vector_source_pi = 0.; ppw->vector_source_v = 0.; /** - --> photon contribution to vector sources: */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppw->vector_source_v += 4./3.*a2*ppw->pvecback[pba->index_bg_rho_g] * (-1./4.*_SQRT2_) * (y[ppw->pv->index_pt_delta_g]+2.*y[ppw->pv->index_pt_delta_g]+y[ppw->pv->index_pt_shear_g]); ppw->vector_source_pi += 1./3.*a2*ppw->pvecback[pba->index_bg_rho_g] * (6.*_SQRT2_/5./sqrt(1.-2.*pba->K/k/k)) * (4./3./k*y[ppw->pv->index_pt_theta_g]+y[ppw->pv->index_pt_l3_g]); } } /** - --> baryons */ } /** - for tensor modes */ if (_tensors_) { ppw->gw_source = 0.0; /** - --> photon contribution to gravitational wave source: */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /* if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* if tight-coupling approximation is off */ ppw->gw_source += (-_SQRT6_*4*a2*ppw->pvecback[pba->index_bg_rho_g]* (1./15.*y[ppw->pv->index_pt_delta_g]+ 4./21.*y[ppw->pv->index_pt_shear_g]+ 1./35.*y[ppw->pv->index_pt_l3_g+1])); } } /** - --> ur contribution to gravitational wave source: */ if (ppt->evolve_tensor_ur == _TRUE_){ rho_relativistic = 0.; if (ppt->tensor_method == tm_exact) rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur]; if (ppt->tensor_method == tm_massless_approximation) { if (pba->has_ur == _TRUE_) rho_relativistic += ppw->pvecback[pba->index_bg_rho_ur]; if (pba->has_ncdm == _TRUE_) { for(n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++) { /* (3 p_ncdm1) is the "relativistic" contribution to rho_ncdm1 */ rho_relativistic += 3.*ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]; } } } ppw->gw_source += (-_SQRT6_*4*a2*rho_relativistic* (1./15.*y[ppw->pv->index_pt_delta_ur]+ 4./21.*y[ppw->pv->index_pt_shear_ur]+ 1./35.*y[ppw->pv->index_pt_l3_ur+1])); } /** - --> ncdm contribution to gravitational wave source: */ if (ppt->evolve_tensor_ncdm == _TRUE_){ idx = ppw->pv->index_pt_psi0_ncdm1; // We must integrate to find perturbations: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ gwncdm = 0.; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); gwncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*(1./15.*y[idx]+2./21.*y[idx+2]+1./35.*y[idx+4]); //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } gwncdm *= -_SQRT6_*4*a2*factor; ppw->gw_source += gwncdm; } } } return _SUCCESS_; } /** * Compute the source functions (three terms for temperature, one for * E or B modes, etc.) * * This is one of the few functions in the code which is passed to * the generic_integrator() routine. Since generic_integrator() * should work with functions passed from various modules, the format * of the arguments is a bit special: * * - fixed parameters and workspaces are passed through a generic * pointer. generic_integrator() doesn't know the content of this * pointer. * * - the error management is a bit special: errors are not written as * usual to pth->error_message, but to a generic error_message passed * in the list of arguments. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Input: vector of time derivative of perturbations * @param index_tau Input: index in the array tau_sampling * @param parameters_and_workspace Input/Output: in input, all parameters needed by perturb_derivs, in output, source terms * @param error_message Output: error message * @return the error status */ int perturb_sources( double tau, double * y, double * dy, int index_tau, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ double P; int index_type; struct perturb_parameters_and_workspace * pppaw; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; int index_md; int index_ic; int index_k; double k; double z; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; double delta_g, delta_scf, rho_plus_p_theta_scf; double a_prime_over_a=0.; /* (a'/a) */ double a_prime_over_a_prime=0.; /* (a'/a)' */ int switch_isw = 1; double a_rel, a2_rel, f_dr; /** - rename structure fields (just to avoid heavy notations) */ pppaw = parameters_and_workspace; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; index_md = pppaw->index_md; index_ic = pppaw->index_ic; index_k = pppaw->index_k; k = pppaw->k; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; /** - get background/thermo quantities in this point */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_closeby, &(ppw->last_index_back), pvecback), pba->error_message, error_message); z = pba->a_today/pvecback[pba->index_bg_a]-1.; class_call(thermodynamics_at_z(pba, pth, z, /* redshift z=1/a-1 */ pth->inter_closeby, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); a_rel = ppw->pvecback[pba->index_bg_a]/pba->a_today; a2_rel = a_rel * a_rel; /* derived background quantities, useful only in synchronous gauge */ if (ppt->gauge == synchronous) { a_prime_over_a = pvecback[pba->index_bg_a] * pvecback[pba->index_bg_H]; /* (a'/a)=aH */ a_prime_over_a_prime = pvecback[pba->index_bg_H_prime] * pvecback[pba->index_bg_a] + pow(pvecback[pba->index_bg_H] * pvecback[pba->index_bg_a],2); /* (a'/a)' = aH'+(aH)^2 */ } /** - for scalars */ if (_scalars_) { /** - --> compute metric perturbations */ class_call(perturb_einstein(ppr, pba, pth, ppt, index_md, k, tau, y, ppw), ppt->error_message, error_message); /** - --> compute quantities depending on approximation schemes */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { delta_g = ppw->rsa_delta_g; P = 0.; } else { delta_g = y[ppw->pv->index_pt_delta_g]; if (ppw->approx[ppw->index_ap_tca] == (int)tca_on) P = 5.* ppw->s_l[2] * ppw->tca_shear_g/8.; /* (2.5+0.5+2)shear_g/8 */ else P = (y[ppw->pv->index_pt_pol0_g] + y[ppw->pv->index_pt_pol2_g] + 2.* ppw->s_l[2] *y[ppw->pv->index_pt_shear_g])/8.; } /** - --> for each type, compute source terms */ /* scalar temperature */ if (ppt->has_source_t == _TRUE_) { /* check whether integrated Sachs-Wolf term should be included */ if ((ppt->switch_eisw == 0) && (z >= ppt->eisw_lisw_split_z)){ switch_isw = 0; } if ((ppt->switch_lisw == 0) && (z < ppt->eisw_lisw_split_z)) { switch_isw=0; } /* newtonian gauge: simplest form, not efficient numerically */ /* if (ppt->gauge == newtonian) { _set_source_(ppt->index_tp_t0) = pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_phi_prime] + pvecthermo[pth->index_th_g] * delta_g / 4.; _set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_exp_m_kappa] * k* pvecmetric[ppw->index_mt_psi] + pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b]/k; _set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_g] * P; } */ /* newtonian gauge: slightly more complicated form, but more efficient numerically */ if (ppt->gauge == newtonian) { _set_source_(ppt->index_tp_t0) = ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g / 4. + pvecmetric[ppw->index_mt_psi]) + switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_phi]-pvecmetric[ppw->index_mt_psi]) + pvecthermo[pth->index_th_exp_m_kappa] * 2. * pvecmetric[ppw->index_mt_phi_prime]) + ppt->switch_dop /k/k * (pvecthermo[pth->index_th_g] * dy[ppw->pv->index_pt_theta_b] + pvecthermo[pth->index_th_dg] * y[ppw->pv->index_pt_theta_b]); _set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k* (pvecmetric[ppw->index_mt_psi]-y[ppw->pv->index_pt_phi]); _set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P; } /* synchronous gauge: simplest form, not efficient numerically */ /* if (ppt->gauge == synchronous) { _set_source_(ppt->index_tp_t0) = - pvecthermo[pth->index_th_exp_m_kappa] * pvecmetric[ppw->index_mt_h_prime] / 6. + pvecthermo[pth->index_th_g] / 4. * delta_g; _set_source_(ppt->index_tp_t1) = pvecthermo[pth->index_th_g] * y[ppw->pv->index_pt_theta_b] / k; _set_source_(ppt->index_tp_t2) = pvecthermo[pth->index_th_exp_m_kappa] * k*k* 2./3. * ppw->s_l[2] * pvecmetric[ppw->index_mt_alpha] + pvecthermo[pth->index_th_g] * P; } */ /* synchronous gauge: slightly more complicated form, but more efficient numerically */ if (ppt->gauge == synchronous) { _set_source_(ppt->index_tp_t0) = ppt->switch_sw * pvecthermo[pth->index_th_g] * (delta_g/4. + pvecmetric[ppw->index_mt_alpha_prime]) + switch_isw * (pvecthermo[pth->index_th_g] * (y[ppw->pv->index_pt_eta] - pvecmetric[ppw->index_mt_alpha_prime] - 2 * a_prime_over_a * pvecmetric[ppw->index_mt_alpha]) + pvecthermo[pth->index_th_exp_m_kappa] * 2. * (pvecmetric[ppw->index_mt_eta_prime] - a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime])) + ppt->switch_dop * (pvecthermo[pth->index_th_g] * (dy[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha_prime]) +pvecthermo[pth->index_th_dg] * (y[ppw->pv->index_pt_theta_b]/k/k + pvecmetric[ppw->index_mt_alpha])); _set_source_(ppt->index_tp_t1) = switch_isw * pvecthermo[pth->index_th_exp_m_kappa] * k * (pvecmetric[ppw->index_mt_alpha_prime] + 2. * a_prime_over_a * pvecmetric[ppw->index_mt_alpha] - y[ppw->pv->index_pt_eta]); _set_source_(ppt->index_tp_t2) = ppt->switch_pol * pvecthermo[pth->index_th_g] * P; } } /* scalar polarization */ if (ppt->has_source_p == _TRUE_) { /* all gauges. Note that the correct formula for the E source should have a minus sign, as shown in Hu & White. We put a plus sign to comply with the 'historical convention' established in CMBFAST and CAMB. */ _set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P; } /* now, non-CMB sources */ /* Bardeen potential -PHI_H = phi in Newtonian gauge */ if (ppt->has_source_phi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_phi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi) = y[ppw->pv->index_pt_eta] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha]; } /* its derivative phi' */ if (ppt->has_source_phi_prime == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_phi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi_prime) = dy[ppw->pv->index_pt_eta] - a_prime_over_a_prime * pvecmetric[ppw->index_mt_alpha] - a_prime_over_a * pvecmetric[ppw->index_mt_alpha_prime]; } /* diff of Bardeen potentials PHI_A-PHI_H = psi + phi in newtonian gauge */ if (ppt->has_source_phi_plus_psi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_phi_plus_psi) = y[ppw->pv->index_pt_phi] + pvecmetric[ppw->index_mt_psi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_phi_plus_psi) = y[ppw->pv->index_pt_eta] + pvecmetric[ppw->index_mt_alpha_prime]; } /* Bardeen potential PHI_A = psi in newtonian gauge */ if (ppt->has_source_psi == _TRUE_) { if (ppt->gauge == newtonian) _set_source_(ppt->index_tp_psi) = pvecmetric[ppw->index_mt_psi]; if (ppt->gauge == synchronous) _set_source_(ppt->index_tp_psi) = a_prime_over_a * pvecmetric[ppw->index_mt_alpha] + pvecmetric[ppw->index_mt_alpha_prime]; } /* total matter over density (gauge-invariant, defined as in arXiv:1307.1459) */ if (ppt->has_source_delta_m == _TRUE_) { _set_source_(ppt->index_tp_delta_m) = ppw->delta_m; } /* delta_g */ if (ppt->has_source_delta_g == _TRUE_) { _set_source_(ppt->index_tp_delta_g) = delta_g; } /* delta_baryon */ if (ppt->has_source_delta_b == _TRUE_) { _set_source_(ppt->index_tp_delta_b) = y[ppw->pv->index_pt_delta_b]; } /* delta_cdm */ if (ppt->has_source_delta_cdm == _TRUE_) { _set_source_(ppt->index_tp_delta_cdm) = y[ppw->pv->index_pt_delta_cdm]; } /* delta_dcdm */ if (ppt->has_source_delta_dcdm == _TRUE_) { _set_source_(ppt->index_tp_delta_dcdm) = y[ppw->pv->index_pt_delta_dcdm]; } /* delta_fld */ if (ppt->has_source_delta_fld == _TRUE_) { _set_source_(ppt->index_tp_delta_fld) = y[ppw->pv->index_pt_delta_fld]; } /* delta_scf */ if (ppt->has_source_delta_scf == _TRUE_) { if (ppt->gauge == synchronous){ delta_scf = y[ppw->pv->index_pt_delta0_scf]; } else{ delta_scf = y[ppw->pv->index_pt_delta0_scf]; } _set_source_(ppt->index_tp_delta_scf) = delta_scf; } /* delta_dr */ if (ppt->has_source_delta_dr == _TRUE_) { f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; _set_source_(ppt->index_tp_delta_dr) = y[ppw->pv->index_pt_F0_dr]/f_dr; } /* delta_ur */ if (ppt->has_source_delta_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_delta_ur) = y[ppw->pv->index_pt_delta_ur]; else _set_source_(ppt->index_tp_delta_ur) = ppw->rsa_delta_ur; } /* delta_ncdm1 */ if (ppt->has_source_delta_ncdm == _TRUE_) { for (index_type = ppt->index_tp_delta_ncdm1; index_type < ppt->index_tp_delta_ncdm1+pba->N_ncdm; index_type++) { _set_source_(index_type) = ppw->delta_ncdm[index_type - ppt->index_tp_delta_ncdm1]; } } /* total velocity (gauge-invariant, defined as in arXiv:1307.1459) */ if (ppt->has_source_theta_m == _TRUE_) { _set_source_(ppt->index_tp_theta_m) = ppw->theta_m; } /* theta_g */ if (ppt->has_source_theta_g == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_theta_g) = y[ppw->pv->index_pt_theta_g]; else _set_source_(ppt->index_tp_theta_g) = ppw->rsa_theta_g; } /* theta_baryon */ if (ppt->has_source_theta_b == _TRUE_) { _set_source_(ppt->index_tp_theta_b) = y[ppw->pv->index_pt_theta_b]; } /* theta_cdm */ if (ppt->has_source_theta_cdm == _TRUE_) { _set_source_(ppt->index_tp_theta_cdm) = y[ppw->pv->index_pt_theta_cdm]; } /* theta_dcdm */ if (ppt->has_source_theta_dcdm == _TRUE_) { _set_source_(ppt->index_tp_theta_dcdm) = y[ppw->pv->index_pt_theta_dcdm]; } /* theta_fld */ if (ppt->has_source_theta_fld == _TRUE_) { _set_source_(ppt->index_tp_theta_fld) = y[ppw->pv->index_pt_theta_fld]; } /* theta_scf */ if (ppt->has_source_theta_scf == _TRUE_) { /** New expression in terms of the delta0 - delta1 variables */ rho_plus_p_theta_scf = k*k*ppw->pvecback[pba->index_bg_rho_scf]*(-y[ppw->pv->index_pt_delta0_scf]*sin_scf(pba,ppw->pvecback[pba->index_bg_theta_phi_scf]) +y[ppw->pv->index_pt_delta1_scf]*(1.-cos_scf(pba,ppw->pvecback[pba->index_bg_theta_phi_scf]))) /(a_rel*ppw->pvecback[pba->index_bg_H]*ppw->pvecback[pba->index_bg_y_phi_scf]); _set_source_(ppt->index_tp_theta_scf) = rho_plus_p_theta_scf/ (pvecback[pba->index_bg_rho_scf]+pvecback[pba->index_bg_p_scf]); } /* theta_dr */ if (ppt->has_source_theta_dr == _TRUE_) { f_dr = pow(a2_rel/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; _set_source_(ppt->index_tp_theta_dr) = 3./4.*k*y[ppw->pv->index_pt_F0_dr+1]/f_dr; } /* theta_ur */ if (ppt->has_source_theta_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) _set_source_(ppt->index_tp_theta_ur) = y[ppw->pv->index_pt_theta_ur]; else _set_source_(ppt->index_tp_theta_ur) = ppw->rsa_theta_ur; } /* theta_ncdm1 */ if (ppt->has_source_theta_ncdm == _TRUE_) { for (index_type = ppt->index_tp_theta_ncdm1; index_type < ppt->index_tp_theta_ncdm1+pba->N_ncdm; index_type++) { _set_source_(index_type) = ppw->theta_ncdm[index_type - ppt->index_tp_theta_ncdm1]; } } } /** - for tensors */ if (_tensors_) { /** - --> compute quantities depending on approximation schemes */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { P = -(1./10.*y[ppw->pv->index_pt_delta_g] +2./7.*y[ppw->pv->index_pt_shear_g] +3./70.*y[ppw->pv->index_pt_delta_g+4] -3./5.*y[ppw->pv->index_pt_pol0_g] +6./7.*y[ppw->pv->index_pt_pol2_g] -3./70.*y[ppw->pv->index_pt_pol0_g+4]) /sqrt(6.); } else { P = 2./5.*_SQRT6_*y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC } } else { P = 0.; } /* tensor temperature */ if (ppt->has_source_t == _TRUE_) { _set_source_(ppt->index_tp_t2) = - y[ppw->pv->index_pt_gwdot] * pvecthermo[pth->index_th_exp_m_kappa] + pvecthermo[pth->index_th_g] * P; } /* tensor polarization */ if (ppt->has_source_p == _TRUE_) { /* Note that the correct formula for the polarization source should have a minus sign, as shown in Hu & White. We put a plus sign to comply with the 'historical convention' established in CMBFAST and CAMB. */ _set_source_(ppt->index_tp_p) = sqrt(6.) * pvecthermo[pth->index_th_g] * P; } } return _SUCCESS_; } /** * When testing the code or a cosmological model, it can be useful to * output perturbations at each step of integration (and not just the * delta's at each source sampling point, which is achieved simply by * asking for matter transfer functions). Then this function can be * passed to the generic_evolver routine. * * By default, instead of passing this function to generic_evolver, * one passes a null pointer. Then this function is just not used. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Input: vector of its derivatives (already allocated) * @param parameters_and_workspace Input: fixed parameters (e.g. indices) * @param error_message Output: error message * */ int perturb_print_variables(double tau, double * y, double * dy, void * parameters_and_workspace, ErrorMsg error_message ) { struct perturb_parameters_and_workspace * pppaw; /** Summary: */ /** - define local variables */ double k; int index_md; //struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecmetric; double delta_g,theta_g,shear_g,l4_g,pol0_g,pol1_g,pol2_g,pol4_g; double delta_b,theta_b; double delta_cdm=0.,theta_cdm=0.; double delta_dcdm=0.,theta_dcdm=0.; double delta_dr=0.,theta_dr=0.,shear_dr=0., f_dr=1.0; double delta_ur=0.,theta_ur=0.,shear_ur=0.,l4_ur=0.; double delta_scf=0., theta_scf=0., theta_phi_scf=0., y_phi_scf=0., omega_scf=0.; double delta0_scf=0., delta1_scf=0.; /** - ncdm sector begins */ int n_ncdm; double *delta_ncdm=NULL, *theta_ncdm=NULL, *shear_ncdm=NULL, *delta_p_over_delta_rho_ncdm=NULL; double rho_ncdm_bg, p_ncdm_bg, pseudo_p_ncdm, w_ncdm; double rho_delta_ncdm = 0.0; double rho_plus_p_theta_ncdm = 0.0; double rho_plus_p_shear_ncdm = 0.0; double delta_p_ncdm = 0.0; double factor = 0.0; double q,q2,epsilon; /** - ncdm sector ends */ double phi=0.,psi=0.,alpha=0.; double delta_temp=0., delta_chi=0.; double a,a2,H; int idx,index_q, storeidx; double *dataptr; /** - rename structure fields (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; index_md = pppaw->index_md; //ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecmetric = ppw->pvecmetric; a = pvecback[pba->index_bg_a]; a2 = a*a; H = pvecback[pba->index_bg_H]; if (pba->has_ncdm == _TRUE_){ class_alloc(delta_ncdm, sizeof(double)*pba->N_ncdm,error_message); class_alloc(theta_ncdm, sizeof(double)*pba->N_ncdm,error_message); class_alloc(shear_ncdm, sizeof(double)*pba->N_ncdm,error_message); class_alloc(delta_p_over_delta_rho_ncdm, sizeof(double)*pba->N_ncdm,error_message); } /** - calculate perturbed recombination */ if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ delta_temp = y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; delta_chi =y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; } /** - for scalar modes */ if (_scalars_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { delta_g = y[ppw->pv->index_pt_delta_g]; theta_g = y[ppw->pv->index_pt_theta_g]; } else { delta_g = ppw->rsa_delta_g; theta_g = ppw->rsa_theta_g; } if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_on) { shear_g = ppw->tca_shear_g; //l3_g = 6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; pol0_g = 2.5*ppw->tca_shear_g; pol1_g = 7./12.*6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; pol2_g = 0.5*ppw->tca_shear_g; //pol3_g = 0.25*6./7.*k/ppw->pvecthermo[pth->index_th_dkappa]*ppw->tca_shear_g; } else { shear_g = y[ppw->pv->index_pt_shear_g]; //l3_g = y[ppw->pv->index_pt_l3_g]; pol0_g = y[ppw->pv->index_pt_pol0_g]; pol1_g = y[ppw->pv->index_pt_pol1_g]; pol2_g = y[ppw->pv->index_pt_pol2_g]; //pol3_g = y[ppw->pv->index_pt_pol3_g]; } } else { shear_g = 0; //l3_g = 0; pol0_g = 0; pol1_g = 0; pol2_g = 0; //pol3_g = 0.; } if (pba->has_ur == _TRUE_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { delta_ur = y[ppw->pv->index_pt_delta_ur]; theta_ur = y[ppw->pv->index_pt_theta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; } else { delta_ur = ppw->rsa_delta_ur; theta_ur = ppw->rsa_theta_ur; shear_ur = 0.; } } delta_b = y[ppw->pv->index_pt_delta_b]; theta_b = y[ppw->pv->index_pt_theta_b]; if (pba->has_cdm == _TRUE_) { delta_cdm = y[ppw->pv->index_pt_delta_cdm]; if (ppt->gauge == synchronous) { theta_cdm = 0.; } else { theta_cdm = y[ppw->pv->index_pt_theta_cdm]; } } /* gravitational potentials */ if (ppt->gauge == synchronous) { alpha = pvecmetric[ppw->index_mt_alpha]; psi = pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a] * alpha + pvecmetric[ppw->index_mt_alpha_prime]; phi = y[ppw->pv->index_pt_eta] - pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; } else if (ppt->gauge == newtonian){ psi = pvecmetric[ppw->index_mt_psi]; phi = y[ppw->pv->index_pt_phi]; } else{ psi = 0.0; phi = 0.0; } if (pba->has_ncdm == _TRUE_) { /** - --> Get delta, deltaP/rho, theta, shear and store in array */ idx = ppw->pv->index_pt_psi0_ncdm1; if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on){ // The perturbations are evolved integrated: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; w_ncdm = p_ncdm_bg/rho_ncdm_bg; delta_ncdm[n_ncdm] = y[idx]; theta_ncdm[n_ncdm] = y[idx+1]; shear_ncdm[n_ncdm] = y[idx+2]; //This is the adiabatic sound speed: delta_p_over_delta_rho_ncdm[n_ncdm] = w_ncdm*(1.0-1.0/(3.0+3.0*w_ncdm)*(3.0*w_ncdm-2.0+pseudo_p_ncdm/p_ncdm_bg)); idx += ppw->pv->l_max_ncdm[n_ncdm]+1; } } else{ // We must integrate to find perturbations: for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_delta_ncdm = 0.0; rho_plus_p_theta_ncdm = 0.0; rho_plus_p_shear_ncdm = 0.0; delta_p_ncdm = 0.0; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1]; rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2]; delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } rho_delta_ncdm *= factor; rho_plus_p_theta_ncdm *= k*factor; rho_plus_p_shear_ncdm *= 2.0/3.0*factor; delta_p_ncdm *= factor/3.; delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); delta_p_over_delta_rho_ncdm[n_ncdm] = delta_p_ncdm/rho_delta_ncdm; if (delta_p_over_delta_rho_ncdm[n_ncdm] < -0.5){ FILE * fid = fopen("integrand.dat","w"); fclose(fid); } } } } if (pba->has_dcdm == _TRUE_) { delta_dcdm = y[ppw->pv->index_pt_delta_dcdm]; theta_dcdm = y[ppw->pv->index_pt_theta_dcdm]; } if (pba->has_dr == _TRUE_) { f_dr = pow(pvecback[pba->index_bg_a]*pvecback[pba->index_bg_a]/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; delta_dr = y[ppw->pv->index_pt_F0_dr]/f_dr; theta_dr = y[ppw->pv->index_pt_F0_dr+1]*3./4.*k/f_dr; shear_dr = y[ppw->pv->index_pt_F0_dr+2]*0.5/f_dr; } if (pba->has_scf == _TRUE_){ if (ppt->gauge == synchronous){ /** Use of auxiliar variables */ theta_phi_scf = pvecback[pba->index_bg_theta_phi_scf]; y_phi_scf = pvecback[pba->index_bg_y_phi_scf]; omega_scf = y[ppw->pv->index_pt_omega_scf]; delta0_scf = y[ppw->pv->index_pt_delta0_scf]; delta1_scf = y[ppw->pv->index_pt_delta1_scf]; delta_scf = delta0_scf; /** Approximate expression for the velocity gradient alone. Just for the output, no consequences for the integration of the equations of motion. */ theta_scf = k*k*(delta1_scf*(1.-cos_scf(pba,theta_phi_scf))-delta0_scf*sin_scf(pba,theta_phi_scf))/(a*H*y_phi_scf); } else{ delta_scf = delta0_scf; /*1./3.* (1./a2*ppw->pvecback[pba->index_bg_phi_prime_scf]*y[ppw->pv->index_pt_phi_prime_scf] + ppw->pvecback[pba->index_bg_dV_scf]*y[ppw->pv->index_pt_phi_scf] - 1./a2*pow(ppw->pvecback[pba->index_bg_phi_prime_scf],2)*ppw->pvecmetric[ppw->index_mt_psi]); */ } } /* converting synchronous variables to newtonian ones */ //if (ppt->gauge == synchronous) { /* density and velocity perturbations (comment out if you wish to keep synchronous variables) */ /*delta_g -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_g += k*k*alpha; delta_b -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_b += k*k*alpha; if (pba->has_ur == _TRUE_) { delta_ur -= 4. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_ur += k*k*alpha; } if (pba->has_dr == _TRUE_) { delta_dr += (-4.*a*H+a*pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]/pvecback[pba->index_bg_rho_dr])*alpha; theta_dr += k*k*alpha; } if (pba->has_cdm == _TRUE_) { delta_cdm -= 3. * pvecback[pba->index_bg_H]*pvecback[pba->index_bg_a]*alpha; theta_cdm += k*k*alpha; } if (pba->has_ncdm == _TRUE_) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){*/ /** - --> Do gauge transformation of delta, deltaP/rho (?) and theta using -= 3aH(1+w_ncdm) alpha for delta. */ //} //} /*if (pba->has_dcdm == _TRUE_) { delta_dcdm += alpha*(-a*pba->Gamma_dcdm-3.*a*H); theta_dcdm += k*k*alpha; } if (pba->has_scf == _TRUE_) { delta_scf += alpha*(-3.0*H*(1.0+pvecback[pba->index_bg_p_scf]/pvecback[pba->index_bg_rho_scf])); theta_scf += k*k*alpha; } }*/ // fprintf(ppw->perturb_output_file," "); /** - --> Handle (re-)allocation */ if (ppt->scalar_perturbations_data[ppw->index_ikout] == NULL){ class_alloc(ppt->scalar_perturbations_data[ppw->index_ikout], sizeof(double)*ppt->number_of_scalar_titles, error_message); ppt->size_scalar_perturbation_data[ppw->index_ikout] = 0; } else{ ppt->scalar_perturbations_data[ppw->index_ikout] = realloc(ppt->scalar_perturbations_data[ppw->index_ikout], sizeof(double)*(ppt->size_scalar_perturbation_data[ppw->index_ikout]+ppt->number_of_scalar_titles)); } storeidx = 0; dataptr = ppt->scalar_perturbations_data[ppw->index_ikout]+ ppt->size_scalar_perturbation_data[ppw->index_ikout]; ppt->size_scalar_perturbation_data[ppw->index_ikout] += ppt->number_of_scalar_titles; class_store_double(dataptr, tau, _TRUE_, storeidx); class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx); class_store_double(dataptr, delta_g, _TRUE_, storeidx); class_store_double(dataptr, theta_g, _TRUE_, storeidx); class_store_double(dataptr, shear_g, _TRUE_, storeidx); class_store_double(dataptr, pol0_g, _TRUE_, storeidx); class_store_double(dataptr, pol1_g, _TRUE_, storeidx); class_store_double(dataptr, pol2_g, _TRUE_, storeidx); class_store_double(dataptr, delta_b, _TRUE_, storeidx); class_store_double(dataptr, theta_b, _TRUE_, storeidx); class_store_double(dataptr, psi, _TRUE_, storeidx); class_store_double(dataptr, phi, _TRUE_, storeidx); /* perturbed recombination */ class_store_double(dataptr, delta_temp, ppt->has_perturbed_recombination, storeidx); class_store_double(dataptr, delta_chi, ppt->has_perturbed_recombination, storeidx); /* Ultra relativistic species */ class_store_double(dataptr, delta_ur, pba->has_ur, storeidx); class_store_double(dataptr, theta_ur, pba->has_ur, storeidx); class_store_double(dataptr, shear_ur, pba->has_ur, storeidx); /* Cold dark matter */ class_store_double(dataptr, delta_cdm, pba->has_cdm, storeidx); class_store_double(dataptr, theta_cdm, pba->has_cdm, storeidx); /* Non-cold Dark Matter */ if ((pba->has_ncdm == _TRUE_) && ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_) || (ppt->has_source_delta_m == _TRUE_))) { for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, delta_p_over_delta_rho_ncdm[n_ncdm], _TRUE_, storeidx); } } /* Decaying cold dark matter */ class_store_double(dataptr, delta_dcdm, pba->has_dcdm, storeidx); class_store_double(dataptr, theta_dcdm, pba->has_dcdm, storeidx); /* Decay radiation */ class_store_double(dataptr, delta_dr, pba->has_dr, storeidx); class_store_double(dataptr, theta_dr, pba->has_dr, storeidx); class_store_double(dataptr, shear_dr, pba->has_dr, storeidx); /* Scalar field scf*/ class_store_double(dataptr, delta_scf, pba->has_scf, storeidx); class_store_double(dataptr, theta_scf, pba->has_scf, storeidx); class_store_double(dataptr, omega_scf, pba->has_scf, storeidx); class_store_double(dataptr, delta0_scf, pba->has_scf, storeidx); class_store_double(dataptr, delta1_scf, pba->has_scf, storeidx); //fprintf(ppw->perturb_output_file,"\n"); } /** - for tensor modes: */ if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa]==(int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { delta_g = y[ppw->pv->index_pt_delta_g]; shear_g = y[ppw->pv->index_pt_shear_g]; l4_g = y[ppw->pv->index_pt_delta_g+4]; pol0_g = y[ppw->pv->index_pt_pol0_g]; pol2_g = y[ppw->pv->index_pt_pol2_g]; pol4_g = y[ppw->pv->index_pt_pol0_g+4]; } else { delta_g = -4./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC shear_g = 0.; l4_g = 0.; pol0_g = 1./3.*ppw->pv->y[ppw->pv->index_pt_gwdot]/ppw->pvecthermo[pth->index_th_dkappa]; //TBC pol2_g = 0.; pol4_g = 0.; } } else { delta_g = 0.; shear_g = 0.; l4_g = 0.; pol0_g = 0.; pol2_g = 0.; pol4_g = 0.; } if (ppt->evolve_tensor_ur == _TRUE_){ delta_ur = y[ppw->pv->index_pt_delta_ur]; shear_ur = y[ppw->pv->index_pt_shear_ur]; l4_ur = y[ppw->pv->index_pt_delta_ur+4]; } /** - --> Handle (re-)allocation */ if (ppt->tensor_perturbations_data[ppw->index_ikout] == NULL){ class_alloc(ppt->tensor_perturbations_data[ppw->index_ikout], sizeof(double)*ppt->number_of_tensor_titles, error_message); ppt->size_tensor_perturbation_data[ppw->index_ikout] = 0; } else{ ppt->tensor_perturbations_data[ppw->index_ikout] = realloc(ppt->tensor_perturbations_data[ppw->index_ikout], sizeof(double)*(ppt->size_tensor_perturbation_data[ppw->index_ikout]+ppt->number_of_tensor_titles)); } storeidx = 0; dataptr = ppt->tensor_perturbations_data[ppw->index_ikout]+ ppt->size_tensor_perturbation_data[ppw->index_ikout]; ppt->size_tensor_perturbation_data[ppw->index_ikout] += ppt->number_of_tensor_titles; //fprintf(ppw->perturb_output_file," "); class_store_double(dataptr, tau, _TRUE_, storeidx); class_store_double(dataptr, pvecback[pba->index_bg_a], _TRUE_, storeidx); class_store_double(dataptr, delta_g, _TRUE_, storeidx); class_store_double(dataptr, shear_g, _TRUE_, storeidx); class_store_double(dataptr, l4_g, _TRUE_, storeidx); class_store_double(dataptr, pol0_g, _TRUE_, storeidx); class_store_double(dataptr, pol2_g, _TRUE_, storeidx); class_store_double(dataptr, pol4_g, _TRUE_, storeidx); class_store_double(dataptr, y[ppw->pv->index_pt_gw], _TRUE_, storeidx); class_store_double(dataptr, y[ppw->pv->index_pt_gwdot], _TRUE_, storeidx); class_store_double(dataptr, delta_ur, ppt->evolve_tensor_ur, storeidx); class_store_double(dataptr, shear_ur, ppt->evolve_tensor_ur, storeidx); class_store_double(dataptr, l4_ur, ppt->evolve_tensor_ur, storeidx); //printf("index_pt_delta+ur = %d\n",ppw->pv->index_pt_delta_ur); /* Non-cold Dark Matter */ if (ppt->evolve_tensor_ncdm == _TRUE_) { idx = ppw->pv->index_pt_psi0_ncdm1; for(n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++){ rho_delta_ncdm = 0.0; rho_plus_p_theta_ncdm = 0.0; rho_plus_p_shear_ncdm = 0.0; delta_p_ncdm = 0.0; factor = pba->factor_ncdm[n_ncdm]*pow(pba->a_today/a,4); for (index_q=0; index_q < ppw->pv->q_size_ncdm[n_ncdm]; index_q ++) { q = pba->q_ncdm[n_ncdm][index_q]; q2 = q*q; epsilon = sqrt(q2+pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]*a2); rho_delta_ncdm += q2*epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; rho_plus_p_theta_ncdm += q2*q*pba->w_ncdm[n_ncdm][index_q]*y[idx+1]; rho_plus_p_shear_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx+2]; delta_p_ncdm += q2*q2/epsilon*pba->w_ncdm[n_ncdm][index_q]*y[idx]; //Jump to next momentum bin: idx+=(ppw->pv->l_max_ncdm[n_ncdm]+1); } rho_delta_ncdm *= factor; rho_plus_p_theta_ncdm *= k*factor; rho_plus_p_shear_ncdm *= 2.0/3.0*factor; delta_p_ncdm *= factor/3.; delta_ncdm[n_ncdm] = rho_delta_ncdm/ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; theta_ncdm[n_ncdm] = rho_plus_p_theta_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); shear_ncdm[n_ncdm] = rho_plus_p_shear_ncdm/ (ppw->pvecback[pba->index_bg_rho_ncdm1+n_ncdm]+ppw->pvecback[pba->index_bg_p_ncdm1+n_ncdm]); class_store_double(dataptr, delta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, theta_ncdm[n_ncdm], _TRUE_, storeidx); class_store_double(dataptr, shear_ncdm[n_ncdm], _TRUE_, storeidx); } } // fprintf(ppw->perturb_output_file,"\n"); } if (pba->has_ncdm == _TRUE_){ free(delta_ncdm); free(theta_ncdm); free(shear_ncdm); free(delta_p_over_delta_rho_ncdm); } return _SUCCESS_; } /** * Compute derivative of all perturbations to be integrated * * For each mode (scalar/vector/tensor) and each wavenumber k, this * function computes the derivative of all values in the vector of * perturbed variables to be integrated. * * This is one of the few functions in the code which is passed to the generic_integrator() routine. * Since generic_integrator() should work with functions passed from various modules, the format of the arguments * is a bit special: * - fixed parameters and workspaces are passed through a generic pointer. * generic_integrator() doesn't know what the content of this pointer is. * - errors are not written as usual in pth->error_message, but in a generic * error_message passed in the list of arguments. * * @param tau Input: conformal time * @param y Input: vector of perturbations * @param dy Output: vector of its derivatives (already allocated) * @param parameters_and_workspace Input/Output: in input, fixed parameters (e.g. indices); in output, background and thermo quantities evaluated at tau. * @param error_message Output: error message */ int perturb_derivs(double tau, double * y, double * dy, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* multipole */ int l; /* scale factor and other background quantities */ double a,a2,a_prime_over_a,R; /* short-cut names for the fields of the input structure */ struct perturb_parameters_and_workspace * pppaw; double k,k2; int index_md; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; double * s_l; struct perturb_vector * pv; /* short-cut notations for the perturbations */ double delta_g=0.,theta_g=0.,shear_g=0.; double delta_b,theta_b; double cb2,cs2,ca2; double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_ufa_class=0.; /* perturbed recombination (just to simplify the notation) */ double H0=0.,Nnow=0.,n_H=0.,fHe=0.; double delta_temp=0.,delta_chi=0., chi=0.; double alpha_rec=0.,delta_alpha_rec=0.; double a_rad=0., Compton_CR =0.; double Tb_in_K=0.; /* Non-metric source terms for photons, i.e. \mathcal{P}^{(m)} from arXiv:1305.3261 */ double P0,P1,P2; /* for use with fluid (fld): */ double w,w_prime; /* for use with non-cold dark matter (ncdm): */ int index_q,n_ncdm,idx; double q,epsilon,dlnf0_dlnq,qk_div_epsilon; double rho_ncdm_bg,p_ncdm_bg,pseudo_p_ncdm,w_ncdm,ca2_ncdm,ceff2_ncdm=0.,cvis2_ncdm=0.; /* for use with curvature */ double cotKgen, sqrt_absK; double s2_squared, ssqrt3; /* for use with dcdm and dr */ double f_dr, fprime_dr; /* for use with scalar field */ double Omega_phi_scf,theta_phi_scf, y1_phi_scf, omega_scf, delta0_scf, delta1_scf; /** - rename the fields of the input structure (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; k2=k*k; index_md = pppaw->index_md; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; s_l = ppw->s_l; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; pv = ppw->pv; /** - get background/thermo quantities in this point */ class_call(background_at_tau(pba, tau, pba->normal_info, pba->inter_closeby, &(ppw->last_index_back), pvecback), pba->error_message, error_message); class_call(thermodynamics_at_z(pba, pth, 1./pvecback[pba->index_bg_a]-1., /* redshift z=1/a-1 */ pth->inter_closeby, &(ppw->last_index_thermo), pvecback, pvecthermo), pth->error_message, error_message); /** - get metric perturbations with perturb_einstein() */ class_call(perturb_einstein(ppr, pba, pth, ppt, index_md, k, tau, y, ppw), ppt->error_message, error_message); /** - compute related background quantities */ a = pvecback[pba->index_bg_a]; a2 = a*a; a_prime_over_a = pvecback[pba->index_bg_H] * a; R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b]; /** - Compute 'generalised cotK function of argument \f$ \sqrt{|K|}*\tau \f$, for closing hierarchy. (see equation 2.34 in arXiv:1305.3261): */ if (pba->has_curvature == _FALSE_){ cotKgen = 1.0/(k*tau); } else{ sqrt_absK = sqrt(fabs(pba->K)); if (pba->K < 0) cotKgen = sqrt_absK/k/tanh(sqrt_absK*tau); else cotKgen = sqrt_absK/k/tan(sqrt_absK*tau); } s2_squared = 1.-3.*pba->K/k2; /** - for scalar modes: */ if (_scalars_) { /** - --> (a) define short-cut notations for the scalar perturbations */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; } delta_b = y[pv->index_pt_delta_b]; theta_b = y[pv->index_pt_theta_b]; cb2 = pvecthermo[pth->index_th_cb2]; /** - --> (b) perturbed recombination **/ if ((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca]==(int)tca_off)){ delta_temp= y[ppw->pv->index_pt_perturbed_recombination_delta_temp]; delta_chi= y[ppw->pv->index_pt_perturbed_recombination_delta_chi]; chi=pvecthermo[pth->index_th_xe]; // Conversion of H0 in inverse seconds (pba->H0 is [H0/c] in inverse Mpcs) H0 = pba->H0 * _c_ / _Mpc_over_m_; //Computation of Nnow in SI units Nnow = 3.*H0*H0*pba->Omega0_b*(1.-pth->YHe)/(8.*_PI_*_G_*_m_H_); // total amount of hydrogen today n_H = (pba->a_today/a)*(pba->a_today/a)*(pba->a_today/a)* Nnow; // Helium-to-hydrogen ratio fHe = pth->YHe / (_not4_*(1-pth->YHe)); // The constant such that rho_gamma = a_rad * T^4 a_rad = 8./15.*pow(_PI_,5)*pow(_k_B_,4)/pow(_c_*_h_P_,3); // Compton cooling rate in Mpc^(-1) Compton_CR = 8./3. *_sigma_ * a_rad /(_m_e_ * _c_ *_c_) *_Mpc_over_m_ ; // Temperature is already in Kelvin Tb_in_K = pvecthermo[pth->index_th_Tb]; // Alpha in m^3/s, cf. Recfast paper alpha_rec = 1.14 * 4.309e-19*pow((Tb_in_K * 1e-4),-0.6166)/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) ; // delta alpha, dimensionless delta_alpha_rec= (-0.6166 + 0.6703 * pow((Tb_in_K * 1e-4),0.53)*(-0.6166-0.53))/(1+0.6703*pow((Tb_in_K * 1e-4),0.53)) * delta_temp; } // end of perturbed recombination related quantities /** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below) - Each continuity equation contains a term in (theta+metric_continuity) with metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge - Each Euler equation contains a source term metric_euler with metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge - Each shear derivative equation contains a source term metric_shear equal to metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge - metric_shear_prime is the derivative of metric_shear - In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge, (-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */ if (ppt->gauge == synchronous) { metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.; metric_euler = 0.; metric_shear = k2 * pvecmetric[ppw->index_mt_alpha]; //metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime]; metric_ufa_class = pvecmetric[ppw->index_mt_h_prime]/2.; } if (ppt->gauge == newtonian) { metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime]; metric_euler = k2*pvecmetric[ppw->index_mt_psi]; metric_shear = 0.; //metric_shear_prime = 0.; metric_ufa_class = -6.*pvecmetric[ppw->index_mt_phi_prime]; } /** - --> (d) if some approximation schemes are turned on, enforce a few y[] values computed in perturb_einstein */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { delta_g = ppw->rsa_delta_g; theta_g = ppw->rsa_theta_g; } /** - --> (e) BEGINNING OF ACTUAL SYSTEM OF EQUATIONS OF EVOLUTION */ /* Note concerning perturbed recombination: $cb2*delta_b$ must be replaced everywhere by $cb2*(delta_b+delta_temp)$. If perturbed recombination is not required, delta_temp is equal to zero. */ /** - ---> photon temperature density */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { dy[pv->index_pt_delta_g] = -4./3.*(theta_g+metric_continuity); } /** - ---> baryon density */ dy[pv->index_pt_delta_b] = -(theta_b+metric_continuity); /** - ---> baryon velocity (depends on tight-coupling approximation=tca) */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /* without tca */ /** - ----> perturbed recombination has an impact **/ dy[pv->index_pt_theta_b] = - a_prime_over_a*theta_b + metric_euler + k2*cb2*(delta_b+delta_temp) + R*pvecthermo[pth->index_th_dkappa]*(theta_g-theta_b); } else { /* with tca */ class_call(perturb_tca_slip_and_shear(y,pppaw,error_message), error_message, error_message); /* perturbed recombination has an impact **/ dy[pv->index_pt_theta_b] = (-a_prime_over_a*theta_b +k2*(cb2*(delta_b+delta_temp)+R*(delta_g/4.-s2_squared*ppw->tca_shear_g)) +R*ppw->tca_slip)/(1.+R) +metric_euler; } /** - ---> photon temperature higher momenta and photon polarization (depend on tight-coupling approximation) */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** - ----> if photon tight-coupling is off */ if (ppw->approx[ppw->index_ap_tca] == (int)tca_off) { /** - -----> define \f$ \Pi = G_{\gamma 0} + G_{\gamma 2} + F_{\gamma 2} \f$ */ P0 = (y[pv->index_pt_pol0_g] + y[pv->index_pt_pol2_g] + 2.*s_l[2]*y[pv->index_pt_shear_g])/8.; /** - -----> photon temperature velocity */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s2_squared*y[pv->index_pt_shear_g]) + metric_euler + pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g); /** - -----> photon temperature shear */ dy[pv->index_pt_shear_g] = 0.5*(8./15.*(theta_g+metric_shear) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_l3_g] -pvecthermo[pth->index_th_dkappa]*(2.*y[pv->index_pt_shear_g]-4./5./s_l[2]*P0)); /** - -----> photon temperature l=3 */ l = 3; dy[pv->index_pt_l3_g] = k/(2.0*l+1.0)* (l*s_l[l]*2.*s_l[2]*y[pv->index_pt_shear_g]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_g+1]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /** - -----> photon temperature l>3 */ for (l = 4; l < pv->l_max_g; l++) { dy[pv->index_pt_delta_g+l] = k/(2.0*l+1.0)* (l*s_l[l]*y[pv->index_pt_delta_g+l-1]-(l+1)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; } /** - -----> photon temperature lmax */ l = pv->l_max_g; /* l=lmax */ dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /** - -----> photon polarization l=0 */ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-4.*P0); /** - -----> photon polarization l=1 */ dy[pv->index_pt_pol1_g] = k/3.*(y[pv->index_pt_pol1_g-1]-2.*s_l[2]*y[pv->index_pt_pol1_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol1_g]; /** - -----> photon polarization l=2 */ dy[pv->index_pt_pol2_g] = k/5.*(2.*s_l[2]*y[pv->index_pt_pol2_g-1]-3.*s_l[3]*y[pv->index_pt_pol2_g+1]) -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol2_g]-4./5.*P0); /** - -----> photon polarization l>2 */ for (l=3; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /** - -----> photon polarization lmax_pol */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1]-(l+1)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; } /** - ----> if photon tight-coupling is on: */ else { /** - -----> in that case, only need photon velocity */ /* perturbed recombination has an impact **/ dy[pv->index_pt_theta_g] = -(dy[pv->index_pt_theta_b]+a_prime_over_a*theta_b-cb2*k2*(delta_b+delta_temp))/R +k2*(0.25*delta_g-s2_squared*ppw->tca_shear_g)+(1.+R)/R*metric_euler; } } /** - ---> cdm */ if (pba->has_cdm == _TRUE_) { /** - ----> newtonian gauge: cdm density and velocity */ if (ppt->gauge == newtonian) { dy[pv->index_pt_delta_cdm] = -(y[pv->index_pt_theta_cdm]+metric_continuity); /* cdm density */ dy[pv->index_pt_theta_cdm] = - a_prime_over_a*y[pv->index_pt_theta_cdm] + metric_euler; /* cdm velocity */ } /** - ----> synchronous gauge: cdm density only (velocity set to zero by definition of the gauge) */ if (ppt->gauge == synchronous) { dy[pv->index_pt_delta_cdm] = -metric_continuity; /* cdm density */ } } /* perturbed recombination */ /* computes the derivatives of delta x_e and delta T_b */ if((ppt->has_perturbed_recombination == _TRUE_)&&(ppw->approx[ppw->index_ap_tca] == (int)tca_off)){ // alpha * n_H is in inverse seconds, so we have to multiply it by Mpc_in_sec dy[ppw->pv->index_pt_perturbed_recombination_delta_chi] = - alpha_rec* a * chi*n_H *(delta_alpha_rec + delta_chi + delta_b) * _Mpc_over_m_ / _c_ ; // see the documentation for this formula dy[ppw->pv->index_pt_perturbed_recombination_delta_temp] = 2./3. * dy[ppw->pv->index_pt_delta_b] - a * Compton_CR * pow(pba->T_cmb/a, 4) * chi / (1.+chi+fHe) * ( (1.-pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb])*(delta_g + delta_chi*(1.+fHe)/(1.+chi+fHe)) + pba->T_cmb*pba->a_today/a/pvecthermo[pth->index_th_Tb] *(delta_temp - 1./4. * delta_g) ); } /** - ---> dcdm and dr */ if (pba->has_dcdm == _TRUE_) { /** - ----> dcdm */ dy[pv->index_pt_delta_dcdm] = -(y[pv->index_pt_theta_dcdm]+metric_continuity) - a * pba->Gamma_dcdm / k2 * metric_euler; /* dcdm density */ dy[pv->index_pt_theta_dcdm] = - a_prime_over_a*y[pv->index_pt_theta_dcdm] + metric_euler; /* dcdm velocity */ } /** - ---> dr */ if ((pba->has_dcdm == _TRUE_)&&(pba->has_dr == _TRUE_)) { /* f = rho_dr*a^4/rho_crit_today. In CLASS density units rho_crit_today = H0^2. */ f_dr = pow(pow(a/pba->a_today,2)/pba->H0,2)*pvecback[pba->index_bg_rho_dr]; fprime_dr = pba->Gamma_dcdm*pvecback[pba->index_bg_rho_dcdm]*pow(a,5)/pow(pba->H0,2); /** - ----> dr F0 */ dy[pv->index_pt_F0_dr] = -k*y[pv->index_pt_F0_dr+1]-4./3.*metric_continuity*f_dr+ fprime_dr*(y[pv->index_pt_delta_dcdm]+metric_euler/k2); /** - ----> dr F1 */ dy[pv->index_pt_F0_dr+1] = k/3.*y[pv->index_pt_F0_dr]-2./3.*k*y[pv->index_pt_F0_dr+2]*s2_squared + 4*metric_euler/(3.*k)*f_dr + fprime_dr/k*y[pv->index_pt_theta_dcdm]; /** - ----> exact dr F2 */ dy[pv->index_pt_F0_dr+2] = 8./15.*(3./4.*k*y[pv->index_pt_F0_dr+1]+metric_shear*f_dr) -3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_F0_dr+3]; /** - ----> exact dr l=3 */ l = 3; dy[pv->index_pt_F0_dr+3] = k/(2.*l+1.)* (l*s_l[l]*s_l[2]*y[pv->index_pt_F0_dr+2]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+4]); /** - ----> exact dr l>3 */ for (l = 4; l < pv->l_max_dr; l++) { dy[pv->index_pt_F0_dr+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_F0_dr+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_F0_dr+l+1]); } /** - ----> exact dr lmax_dr */ l = pv->l_max_dr; dy[pv->index_pt_F0_dr+l] = k*(s_l[l]*y[pv->index_pt_F0_dr+l-1]-(1.+l)*cotKgen*y[pv->index_pt_F0_dr+l]); } /** - ---> fluid (fld) */ if (pba->has_fld == _TRUE_) { /** - ----> factors w, w_prime, adiabatic sound speed ca2 (all three background-related), plus actual sound speed in the fluid rest frame cs2 */ w = pba->w0_fld + pba->wa_fld * (1. - a / pba->a_today); w_prime = - pba->wa_fld * a / pba->a_today * a_prime_over_a; ca2 = w - w_prime / 3. / (1.+w) / a_prime_over_a; cs2 = pba->cs2_fld; /** - ----> fluid density */ dy[pv->index_pt_delta_fld] = -(1+w)*(y[pv->index_pt_theta_fld]+metric_continuity) -3.*(cs2-w)*a_prime_over_a*y[pv->index_pt_delta_fld] -9.*(1+w)*(cs2-ca2)*a_prime_over_a*a_prime_over_a*y[pv->index_pt_theta_fld]/k2; /** - ----> fluid velocity */ dy[pv->index_pt_theta_fld] = /* fluid velocity */ -(1.-3.*cs2)*a_prime_over_a*y[pv->index_pt_theta_fld] +cs2*k2/(1.+w)*y[pv->index_pt_delta_fld] +metric_euler; } /** - ---> scalar field (scf) */ if (pba->has_scf == _TRUE_) { /** ---> Klein Gordon equation under new formalism */ /** Auxiliary variables */ Omega_phi_scf = pvecback[pba->index_bg_Omega_phi_scf]; theta_phi_scf = pvecback[pba->index_bg_theta_phi_scf]; y1_phi_scf = pvecback[pba->index_bg_y_phi_scf]; omega_scf = y[pv->index_pt_omega_scf]; delta0_scf = y[pv->index_pt_delta0_scf]; delta1_scf = y[pv->index_pt_delta1_scf]; /** ---> Equations of motion for omega */ dy[pv->index_pt_omega_scf] = a_prime_over_a*(1.5*pvecback[pba->index_bg_w_tot]-0.5-y2_phi_scf(pba,Omega_phi_scf,theta_phi_scf,y1_phi_scf)* exp(0.5*Omega_phi_scf)*sin_scf(pba,0.5*theta_phi_scf)/y1_phi_scf)*y[pv->index_pt_omega_scf]; /** ---> Equations of motion for the density contrasts */ dy[pv->index_pt_delta0_scf] = 0.; /*a_prime_over_a*((3.*sin(theta_phi_scf)+omega_scf*(1.-cos(theta_phi_scf)))*delta1_scf -omega_scf*sin(theta_phi_scf)*delta0_scf) -metric_continuity*(1.-cos(theta_phi_scf)); */ //metric_continuity = h'/2 dy[pv->index_pt_delta1_scf] = 0.;/*-a_prime_over_a*((3.*cos_scf(pba,theta_phi_scf)+omega_scf*sin_scf(pba,theta_phi_scf) -exp(0.5*Omega_phi_scf)*sin_scf(pba,0.5*theta_phi_scf)*y2_phi_scf(pba,Omega_phi_scf,theta_phi_scf,y1_phi_scf)/y1_phi_scf)*delta1_scf -omega_scf*(1.+cos_scf(pba,theta_phi_scf) +exp(0.5*Omega_phi_scf)*cos_scf(pba,0.5*theta_phi_scf)*y2_phi_scf(pba,Omega_phi_scf,theta_phi_scf,y1_phi_scf)/y1_phi_scf)*delta0_scf) -metric_continuity*sin_scf(pba,theta_phi_scf); */ //metric_continuity = h'/2 } /** - ---> ultra-relativistic neutrino/relics (ur) */ if (pba->has_ur == _TRUE_) { /** - ----> if radiation streaming approximation is off */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { /** - -----> ur density */ dy[pv->index_pt_delta_ur] = // standard term -4./3.*(y[pv->index_pt_theta_ur] + metric_continuity) // non-standard term, non-zero if if ceff2_ur not 1/3 +(1.-ppt->three_ceff2_ur)*a_prime_over_a*(y[pv->index_pt_delta_ur] + 4.*a_prime_over_a*y[pv->index_pt_theta_ur]/k/k); /** - -----> ur velocity */ dy[pv->index_pt_theta_ur] = // standard term with extra coefficient (3 ceff2_ur), normally equal to one k2*(ppt->three_ceff2_ur*y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]) + metric_euler // non-standard term, non-zero if ceff2_ur not 1/3 -(1.-ppt->three_ceff2_ur)*a_prime_over_a*y[pv->index_pt_theta_ur]; if(ppw->approx[ppw->index_ap_ufa] == (int)ufa_off) { /** - -----> exact ur shear */ dy[pv->index_pt_shear_ur] = 0.5*( // standard term 8./15.*(y[pv->index_pt_theta_ur]+metric_shear)-3./5.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1] // non-standard term, non-zero if cvis2_ur not 1/3 -(1.-ppt->three_cvis2_ur)*(8./15.*(y[pv->index_pt_theta_ur]+metric_shear))); /** - -----> exact ur l=3 */ l = 3; dy[pv->index_pt_l3_ur] = k/(2.*l+1.)* (l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]); /** - -----> exact ur l>3 */ for (l = 4; l < pv->l_max_ur; l++) { dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]); } /** - -----> exact ur lmax_ur */ l = pv->l_max_ur; dy[pv->index_pt_delta_ur+l] = k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]); } else { /** - -----> in fluid approximation (ufa): only ur shear needed */ //TBC: curvature? /* a la Ma & Bertschinger */ if (ppr->ur_fluid_approximation == ufa_mb) { dy[pv->index_pt_shear_ur] = -3./tau*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_shear); } /* a la Hu */ if (ppr->ur_fluid_approximation == ufa_hu) { dy[pv->index_pt_shear_ur] = -3.*a_prime_over_a*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_shear); } /* a la CLASS */ if (ppr->ur_fluid_approximation == ufa_CLASS) { dy[pv->index_pt_shear_ur] = -3./tau*y[pv->index_pt_shear_ur] +2./3.*(y[pv->index_pt_theta_ur]+metric_ufa_class); } } } } /** - ---> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */ //TBC: curvature in all ncdm if (pba->has_ncdm == _TRUE_) { idx = pv->index_pt_psi0_ncdm1; /** - ----> first case: use a fluid approximation (ncdmfa) */ //TBC: curvature if(ppw->approx[ppw->index_ap_ncdmfa] == (int)ncdmfa_on) { /** - -----> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** - -----> define intermediate quantitites */ rho_ncdm_bg = pvecback[pba->index_bg_rho_ncdm1+n_ncdm]; /* background density */ p_ncdm_bg = pvecback[pba->index_bg_p_ncdm1+n_ncdm]; /* background pressure */ pseudo_p_ncdm = pvecback[pba->index_bg_pseudo_p_ncdm1+n_ncdm]; /* pseudo-pressure (see CLASS IV paper) */ w_ncdm = p_ncdm_bg/rho_ncdm_bg; /* equation of state parameter */ ca2_ncdm = w_ncdm/3.0/(1.0+w_ncdm)*(5.0-pseudo_p_ncdm/p_ncdm_bg); /* adiabatic sound speed */ /* c_eff is (delta p / delta rho) in the gauge under consideration (not in the gauge comoving with the fluid) */ /* c_vis is introduced in order to close the system */ /* different ansatz for sound speed c_eff and viscosity speed c_vis */ if (ppr->ncdm_fluid_approximation == ncdmfa_mb) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = 3.*w_ncdm*ca2_ncdm; } if (ppr->ncdm_fluid_approximation == ncdmfa_hu) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = w_ncdm; } if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) { ceff2_ncdm = ca2_ncdm; cvis2_ncdm = 3.*w_ncdm*ca2_ncdm; } /** - -----> exact continuity equation */ dy[idx] = -(1.0+w_ncdm)*(y[idx+1]+metric_continuity)- 3.0*a_prime_over_a*(ceff2_ncdm-w_ncdm)*y[idx]; /** - -----> exact euler equation */ dy[idx+1] = -a_prime_over_a*(1.0-3.0*ca2_ncdm)*y[idx+1]+ ceff2_ncdm/(1.0+w_ncdm)*k2*y[idx]-k2*y[idx+2] + metric_euler; /** - -----> different ansatz for approximate shear derivative */ if (ppr->ncdm_fluid_approximation == ncdmfa_mb) { dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear); } if (ppr->ncdm_fluid_approximation == ncdmfa_hu) { dy[idx+2] = -3.0*a_prime_over_a*ca2_ncdm/w_ncdm*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_shear); } if (ppr->ncdm_fluid_approximation == ncdmfa_CLASS) { dy[idx+2] = -3.0*(a_prime_over_a*(2./3.-ca2_ncdm-pseudo_p_ncdm/p_ncdm_bg/3.)+1./tau)*y[idx+2] +8.0/3.0*cvis2_ncdm/(1.0+w_ncdm)*s_l[2]*(y[idx+1]+metric_ufa_class); } /** - -----> jump to next species */ idx += pv->l_max_ncdm[n_ncdm]+1; } } /** - ----> second case: use exact equation (Boltzmann hierarchy on momentum grid) */ else { /** - -----> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** - -----> loop over momentum */ for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) { /** - -----> define intermediate quantities */ dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); qk_div_epsilon = k*q/epsilon; /** - -----> ncdm density for given momentum bin */ dy[idx] = -qk_div_epsilon*y[idx+1]+metric_continuity*dlnf0_dlnq/3.; /** - -----> ncdm velocity for given momentum bin */ dy[idx+1] = qk_div_epsilon/3.0*(y[idx] - 2*s_l[2]*y[idx+2]) -epsilon*metric_euler/(3*q*k)*dlnf0_dlnq; /** - -----> ncdm shear for given momentum bin */ dy[idx+2] = qk_div_epsilon/5.0*(2*s_l[2]*y[idx+1]-3.*s_l[3]*y[idx+3]) -s_l[2]*metric_shear*2./15.*dlnf0_dlnq; /** - -----> ncdm l>3 for given momentum bin */ for(l=3; l<pv->l_max_ncdm[n_ncdm]; l++){ dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]); } /** - -----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger) but with curvature taken into account a la arXiv:1305.3261 */ dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l]; /** - -----> jump to next momentum bin or species */ idx += (pv->l_max_ncdm[n_ncdm]+1); } } } } /** - ---> metric */ /** - ---> eta of synchronous gauge */ if (ppt->gauge == synchronous) { dy[pv->index_pt_eta] = pvecmetric[ppw->index_mt_eta_prime]; } if (ppt->gauge == newtonian) { dy[pv->index_pt_phi] = pvecmetric[ppw->index_mt_phi_prime]; } } /** - vector mode */ if (_vectors_) { fprintf(stderr,"we are in vectors\n"); ssqrt3 = sqrt(1.-2.*pba->K/k2); cb2 = pvecthermo[pth->index_th_cb2]; /** - --> baryon velocity */ if (ppt->gauge == synchronous) { dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b] - pvecthermo[pth->index_th_dkappa]*(_SQRT2_/4.*delta_g + y[pv->index_pt_theta_b]); } else if (ppt->gauge == newtonian) { dy[pv->index_pt_theta_b] = -(1-3.*cb2)*a_prime_over_a*y[pv->index_pt_theta_b] - _SQRT2_/4.*pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]) + pvecmetric[ppw->index_mt_V_prime]+(1.-3.*cb2)*a_prime_over_a*y[pv->index_pt_V]; } /* if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { */ /* short-cut notations for the tensor perturbations */ delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; shear_g = y[pv->index_pt_shear_g]; /* (P^{(1)}) (see Eq. B.23 in 1305.3261)*/ P1 = -_SQRT6_/40.*( 4./(3.*k)*theta_g //F1 +y[pv->index_pt_delta_g+3] +2.*y[pv->index_pt_pol0_g] +10./7.*y[pv->index_pt_pol2_g] -4./7.*y[pv->index_pt_pol0_g+4]); if (ppt->gauge == synchronous) { /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]); /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1) +4.0/(3.0*_SQRT2_)*ssqrt3*y[pv->index_pt_hv_prime]; } else if (ppt->gauge == newtonian) { /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+2.*_SQRT2_*y[pv->index_pt_theta_b]) -2.*_SQRT2_*pvecmetric[ppw->index_mt_V_prime]; /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*(theta_g+4.0/_SQRT6_*P1); } /* photon shear (shear_g = F_2/2) */ dy[pv->index_pt_shear_g] = 4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1] -pvecthermo[pth->index_th_dkappa]*shear_g; /* photon l=3 */ dy[pv->index_pt_l3_g] = k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=4; l < pv->l_max_g; l++) dy[pv->index_pt_delta_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* l=lmax */ l = pv->l_max_g; dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1] -(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* photon polarization, l=0 (pol0_g = G_0)*/ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P1); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=1; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* l=lmax */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* } } */ if (ppt->gauge == synchronous) { /* Vector metric perturbation in synchronous gauge: */ dy[pv->index_pt_hv_prime] = pvecmetric[ppw->index_mt_hv_prime_prime]; } else if (ppt->gauge == newtonian){ /* Vector metric perturbation in Newtonian gauge: */ dy[pv->index_pt_V] = pvecmetric[ppw->index_mt_V_prime]; } } /** - tensor modes: */ if (_tensors_) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { if (ppw->approx[ppw->index_ap_tca]==(int)tca_off) { /* short-cut notations for the tensor perturbations */ delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; shear_g = y[pv->index_pt_shear_g]; /* (P^{(2)}) */ P2 =-1.0/_SQRT6_*( 1./10.*delta_g +2./7.*shear_g +3./70.*y[pv->index_pt_delta_g+4] -3./5.*y[pv->index_pt_pol0_g] +6./7.*y[pv->index_pt_pol2_g] -3./70.*y[pv->index_pt_pol0_g+4]); /* above expression from paper, expression below matches old class but is not correct P2 = -1.0/_SQRT6_*( 1./10.*delta_g +2./35.*shear_g +1./210.*y[pv->index_pt_delta_g+4] -3./5.*y[pv->index_pt_pol0_g] +6./35.*y[pv->index_pt_pol2_g] -1./210.*y[pv->index_pt_pol0_g+4] ); */ /* photon density (delta_g = F_0) */ dy[pv->index_pt_delta_g] = -4./3.*theta_g -pvecthermo[pth->index_th_dkappa]*(delta_g+_SQRT6_*P2) //+y[pv->index_pt_gwdot]; +_SQRT6_*y[pv->index_pt_gwdot]; //TBC /* photon velocity (theta_g = (3k/4)*F_1) */ dy[pv->index_pt_theta_g] = k2*(delta_g/4.-s_l[2]*shear_g) -pvecthermo[pth->index_th_dkappa]*theta_g; /* photon shear (shear_g = F_2/2) */ dy[pv->index_pt_shear_g] = 4./15.*s_l[2]*theta_g-3./10.*k*s_l[3]*y[pv->index_pt_shear_g+1] -pvecthermo[pth->index_th_dkappa]*shear_g; /* photon l=3 */ dy[pv->index_pt_l3_g] = k/7.*(6.*s_l[3]*shear_g-4.*s_l[4]*y[pv->index_pt_l3_g+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_l3_g]; /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=4; l < pv->l_max_g; l++) dy[pv->index_pt_delta_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_delta_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_delta_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* l=lmax */ l = pv->l_max_g; dy[pv->index_pt_delta_g+l] = k*(s_l[l]*y[pv->index_pt_delta_g+l-1] -(1.+l)*cotKgen*y[pv->index_pt_delta_g+l]) - pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_delta_g+l]; /* photon polarization, l=0 (pol0_g = G_0)*/ dy[pv->index_pt_pol0_g] = -k*y[pv->index_pt_pol0_g+1] -pvecthermo[pth->index_th_dkappa]*(y[pv->index_pt_pol0_g]-_SQRT6_*P2); /* additional momenta in Boltzmann hierarchy (beyond l=0,1,2,3,4) */ for (l=1; l < pv->l_max_pol_g; l++) dy[pv->index_pt_pol0_g+l] = k/(2.*l+1.)*(l*s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*s_l[l+1]*y[pv->index_pt_pol0_g+l+1]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; /* l=lmax */ l = pv->l_max_pol_g; dy[pv->index_pt_pol0_g+l] = k*(s_l[l]*y[pv->index_pt_pol0_g+l-1] -(l+1.)*cotKgen*y[pv->index_pt_pol0_g+l]) -pvecthermo[pth->index_th_dkappa]*y[pv->index_pt_pol0_g+l]; } } if (ppt->evolve_tensor_ur == _TRUE_) { dy[pv->index_pt_delta_ur] = -4./3.*y[pv->index_pt_theta_ur]+_SQRT6_*y[pv->index_pt_gwdot]; dy[pv->index_pt_theta_ur] = k2*(y[pv->index_pt_delta_ur]/4.-s2_squared*y[pv->index_pt_shear_ur]); dy[pv->index_pt_shear_ur] = (4./15.*y[pv->index_pt_theta_ur] -3./10.*k*s_l[3]/s_l[2]*y[pv->index_pt_shear_ur+1]); l = 3; dy[pv->index_pt_l3_ur] = k/(2.*l+1.)* (l*2.*s_l[l]*s_l[2]*y[pv->index_pt_shear_ur]-(l+1.)*s_l[l+1]*y[pv->index_pt_l3_ur+1]); for (l = 4; l < pv->l_max_ur; l++) { dy[pv->index_pt_delta_ur+l] = k/(2.*l+1)* (l*s_l[l]*y[pv->index_pt_delta_ur+l-1]-(l+1.)*s_l[l+1]*y[pv->index_pt_delta_ur+l+1]); } l = pv->l_max_ur; dy[pv->index_pt_delta_ur+l] = k*(s_l[l]*y[pv->index_pt_delta_ur+l-1]-(1.+l)*cotKgen*y[pv->index_pt_delta_ur+l]); } /** - --> non-cold dark matter (ncdm): massive neutrinos, WDM, etc. */ //TBC: curvature in all ncdm if (ppt->evolve_tensor_ncdm == _TRUE_) { idx = pv->index_pt_psi0_ncdm1; /** - ---> loop over species */ for (n_ncdm=0; n_ncdm<pv->N_ncdm; n_ncdm++) { /** - ----> loop over momentum */ for (index_q=0; index_q < pv->q_size_ncdm[n_ncdm]; index_q++) { /** - ----> define intermediate quantities */ dlnf0_dlnq = pba->dlnf0_dlnq_ncdm[n_ncdm][index_q]; q = pba->q_ncdm[n_ncdm][index_q]; epsilon = sqrt(q*q+a2*pba->M_ncdm[n_ncdm]*pba->M_ncdm[n_ncdm]); qk_div_epsilon = k*q/epsilon; /** - ----> ncdm density for given momentum bin */ dy[idx] = -qk_div_epsilon*y[idx+1]-0.25*_SQRT6_*y[pv->index_pt_gwdot]*dlnf0_dlnq; /** - ----> ncdm l>0 for given momentum bin */ for(l=1; l<pv->l_max_ncdm[n_ncdm]; l++){ dy[idx+l] = qk_div_epsilon/(2.*l+1.0)*(l*s_l[l]*y[idx+(l-1)]-(l+1.)*s_l[l+1]*y[idx+(l+1)]); } /** - ----> ncdm lmax for given momentum bin (truncation as in Ma and Bertschinger) but with curvature taken into account a la arXiv:1305.3261 */ dy[idx+l] = qk_div_epsilon*y[idx+l-1]-(1.+l)*k*cotKgen*y[idx+l]; /** - ----> jump to next momentum bin or species */ idx += (pv->l_max_ncdm[n_ncdm]+1); } } } /** - --> tensor metric perturbation h (gravitational waves) */ dy[pv->index_pt_gw] = y[pv->index_pt_gwdot]; /** - --> its time-derivative */ dy[pv->index_pt_gwdot] = pvecmetric[ppw->index_mt_gw_prime_prime]; } return _SUCCESS_; } int perturb_tca_slip_and_shear(double * y, void * parameters_and_workspace, ErrorMsg error_message ) { /** Summary: */ /** - define local variables */ /* scale factor and other background quantities */ double a,a_prime_over_a,a_primeprime_over_a,R; /* useful terms for tight-coupling approximation */ double slip=0.; double tau_c=0.,dtau_c=0.; double theta_prime,shear_g_prime=0.,theta_prime_prime; double g0,g0_prime,g0_prime_prime; double F=0.,F_prime=0.,F_prime_prime=0.; /* short-cut names for the fields of the input structure */ struct perturb_parameters_and_workspace * pppaw; double k,k2; struct precision * ppr; struct background * pba; struct thermo * pth; struct perturbs * ppt; struct perturb_workspace * ppw; double * pvecback; double * pvecthermo; double * pvecmetric; struct perturb_vector * pv; /* short-cut notations for the perturbations */ double delta_g=0.,theta_g=0.,shear_g=0.; double delta_b,theta_b; double Delta; double cb2; double metric_continuity=0.,metric_euler=0.,metric_shear=0.,metric_shear_prime=0.; /* perturbed recombination */ double delta_temp=0.; /* for use with curvature */ double s2_squared; /** - rename the fields of the input structure (just to avoid heavy notations) */ pppaw = parameters_and_workspace; k = pppaw->k; k2=k*k; ppr = pppaw->ppr; pba = pppaw->pba; pth = pppaw->pth; ppt = pppaw->ppt; ppw = pppaw->ppw; pvecback = ppw->pvecback; pvecthermo = ppw->pvecthermo; pvecmetric = ppw->pvecmetric; pv = ppw->pv; /** - compute related background quantities */ a = pvecback[pba->index_bg_a]; a_prime_over_a = pvecback[pba->index_bg_H] * a; a_primeprime_over_a = pvecback[pba->index_bg_H_prime] * a + 2. * a_prime_over_a * a_prime_over_a; //z = pba->a_today-1.; R = 4./3. * pvecback[pba->index_bg_rho_g]/pvecback[pba->index_bg_rho_b]; s2_squared = 1.-3.*pba->K/k2; /** - --> (a) define short-cut notations for the scalar perturbations */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_off) { delta_g = y[pv->index_pt_delta_g]; theta_g = y[pv->index_pt_theta_g]; } delta_b = y[pv->index_pt_delta_b]; theta_b = y[pv->index_pt_theta_b]; cb2 = pvecthermo[pth->index_th_cb2]; /* perturbed recombination */ if ((ppt->has_perturbed_recombination == _TRUE_) && (ppw->approx[ppw->index_ap_tca] == (int)tca_off) ){ delta_temp = y[pv->index_pt_perturbed_recombination_delta_temp]; } /** - --> (b) define short-cut notations used only in tight-coupling approximation */ tau_c = 1./pvecthermo[pth->index_th_dkappa]; /* inverse of opacity */ dtau_c = -pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c; /* its first derivative wrt conformal time */ F = tau_c/(1+R); /* F = tau_c/(1+R) */ if (ppr->tight_coupling_approximation >= (int)second_order_CLASS) { F_prime = dtau_c/(1+R)+tau_c*a_prime_over_a*R/(1+R)/(1+R); /*F' needed by second_order_CLASS and compromise_CLASS */ if (ppr->tight_coupling_approximation == (int)second_order_CLASS) { F_prime_prime =(- pvecthermo[pth->index_th_dddkappa]*tau_c*tau_c /* F'' needed by second_order_CLASS only */ + 2.*pvecthermo[pth->index_th_ddkappa]*pvecthermo[pth->index_th_ddkappa]*tau_c*tau_c*tau_c)/(1+R) +2.*dtau_c*a_prime_over_a*R/(1+R)/(1+R) +tau_c*((a_primeprime_over_a-2.*a_prime_over_a*a_prime_over_a)+2.*a_prime_over_a*a_prime_over_a*R/(1+R))*R/(1+R)/(1+R); } } /** - --> (c) compute metric-related quantities (depending on gauge; additional gauges can be coded below) - Each continuity equation contains a term in (theta+metric_continuity) with metric_continuity = (h_prime/2) in synchronous gauge, (-3 phi_prime) in newtonian gauge - Each Euler equation contains a source term metric_euler with metric_euler = 0 in synchronous gauge, (k2 psi) in newtonian gauge - Each shear derivative equation contains a source term metric_shear equal to metric_shear = (h_prime+6eta_prime)/2 in synchronous gauge, 0 in newtonian gauge - metric_shear_prime is the derivative of metric_shear - In the ufa_class approximation, the leading-order source term is (h_prime/2) in synchronous gauge, (-3 (phi_prime+psi_prime)) in newtonian gauge: we approximate the later by (-6 phi_prime) */ if (ppt->gauge == synchronous) { metric_continuity = pvecmetric[ppw->index_mt_h_prime]/2.; metric_euler = 0.; metric_shear = k2 * pvecmetric[ppw->index_mt_alpha]; metric_shear_prime = k2 * pvecmetric[ppw->index_mt_alpha_prime]; } if (ppt->gauge == newtonian) { metric_continuity = -3.*pvecmetric[ppw->index_mt_phi_prime]; metric_euler = k2*pvecmetric[ppw->index_mt_psi]; metric_shear = 0.; metric_shear_prime = 0.; } /** - --> (d) if some approximation schemes are turned on, enforce a few y[ ] values computed in perturb_einstein */ /* free-streaming photon velocity */ if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) theta_g = ppw->rsa_theta_g; /** - ---> like Ma & Bertschinger */ if (ppr->tight_coupling_approximation == (int)first_order_MB) { slip=2.*R/(1.+R)*a_prime_over_a*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** - ---> relax assumption dkappa~a\f$^{-2}\f$ (like in CAMB) */ if ((ppr->tight_coupling_approximation == (int)first_order_CAMB) || (ppr->tight_coupling_approximation == (int)compromise_CLASS)) { slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** - ---> also relax assumption cb2~a\f$^{-1}\f$ */ if ((ppr->tight_coupling_approximation == (int)first_order_CLASS) || (ppr->tight_coupling_approximation == (int)second_order_CLASS)){ slip=(dtau_c/tau_c-2.*a_prime_over_a/(1.+R))*(theta_b-theta_g) +F*(-a_primeprime_over_a*theta_b +k2*(-a_prime_over_a*delta_g/2. +pvecthermo[pth->index_th_dcb2]*delta_b +cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4.) -a_prime_over_a*metric_euler); } /** - ---> intermediate quantities for 2nd order tca: shear_g at first order in tight-coupling */ shear_g=16./45.*tau_c*(theta_g+metric_shear); /* (Ma & Bertschinger give (1/9)*(4/3) instead of (2/15)*(4/3) because they didn't include the contribution of G_gamma0 and G_gamma2, which are of the same order as sigma_g. This was already consistently included in CAMB) */ /** - ---> intermediate quantities for 2nd order tca: zero order for theta_b' = theta_g' */ /** - ----> perturbed recombination has an impact **/ theta_prime = (-a_prime_over_a*theta_b+k2*(cb2*(delta_b+delta_temp)+R/4.*delta_g))/(1.+R) + metric_euler; /** - ---> intermediate quantities for 2nd order tca: shear_g_prime at first order in tight-coupling */ shear_g_prime=16./45.*(tau_c*(theta_prime+metric_shear_prime)+dtau_c*(theta_g+metric_shear)); /** - ---> 2nd order as in CRS*/ if (ppr->tight_coupling_approximation == (int)second_order_CRS) { if (ppt->gauge == newtonian) { class_stop(error_message, "the second_order_CRS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme"); } if (ppt->gauge == synchronous) { class_test(pba->sgnK != 0, ppt->error_message, "the second_order_CRS approach to tight-coupling is coded in the flat case only: for non-flat try another tight-coupling scheme"); /* infer Delta from h'' using Einstein equation */ Delta = 2*k2*y[pv->index_pt_eta] -2*a_prime_over_a*pvecmetric[ppw->index_mt_h_prime] -pvecmetric[ppw->index_mt_h_prime_prime]; /* monster expression for slip at second-order in tight-coupling */ slip=(-2./(1.+R)*a_prime_over_a-pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa])*(theta_b-theta_g) +(-a_primeprime_over_a*theta_b -k2*a_prime_over_a*(delta_g/2.-2.*shear_g) +k2*(cb2*(-theta_b-metric_continuity) -4./3.*(-theta_g-metric_continuity)/4. +shear_g_prime) )/pvecthermo[pth->index_th_dkappa]/(1.+R) -2.*R*(3.*a_prime_over_a*a_prime_over_a*cb2+(1.+R)*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)-3.*a_prime_over_a*a_prime_over_a) /(1.+R)/(1.+R)/(1.+R)*(theta_b-theta_g)/pvecthermo[pth->index_th_dkappa] +( a_primeprime_over_a*a_prime_over_a*((2.-3.*cb2)*R-2.)*theta_b/(1.+R) +a_prime_over_a*k2*(1.-3.*cb2)*theta_b/3./(1.+R) /* perturbed recombination has an impact (next two lines) */ +a_primeprime_over_a*k2*cb2*(delta_b+delta_temp)/(1.+R) +k2*k2*(3.*cb2-1.)*cb2*(delta_b+delta_temp)/3./(1.+R) +k2*k2*R*(3.*cb2-1.)*delta_g/12./(1.+R) +a_primeprime_over_a*k2*(2.+3.*R)*delta_g/4./(1.+R) +a_prime_over_a*a_prime_over_a*k2*((2.-3.*cb2)*R-1.)*delta_g/2./(1.+R) +a_prime_over_a*k2*cb2*(1.+(3.*cb2-2.)*R)*(-theta_b-metric_continuity)/(1.+R) +a_prime_over_a*k2*(2.+(5.-3.*cb2)*R)*4./3.*(-theta_g-metric_continuity)/4./(1.+R) +a_prime_over_a*(1.-3.*cb2)*k2*2.*metric_shear/3. +k2*k2*(3.*cb2-1.)*y[pv->index_pt_eta]/3. +2.*a_prime_over_a*k2*(3.*cb2-1.)*pvecmetric[ppw->index_mt_eta_prime] +k2*(1.-3.*cb2)*Delta/6. )/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/(1.+R)/(1.+R) -(4.*a_primeprime_over_a*theta_b-4.*k2*cb2*(-theta_b-metric_continuity)+2.*a_prime_over_a*k2*delta_g+k2*4./3.*(-theta_g-metric_continuity))/2./(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa] +4.*a_prime_over_a*R/(1.+R)/(1.+R)*pvecthermo[pth->index_th_ddkappa]/pvecthermo[pth->index_th_dkappa]/pvecthermo[pth->index_th_dkappa]*(theta_b-theta_g); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+k2*pvecmetric[ppw->index_mt_alpha_prime]); } } /** - ---> 2nd order like in CLASS paper */ if (ppr->tight_coupling_approximation == (int)second_order_CLASS) { if (ppt->gauge == newtonian) { class_stop(error_message, "the second_order_CLASS approach to tight-coupling is coded in synchronous gauge, not newtonian: change gauge or try another tight-coupling scheme"); } if (ppt->gauge == synchronous) { /* zero order for theta_b'' = theta_g'' */ theta_prime_prime = ((R-1.)*a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b +k2*(pvecthermo[pth->index_th_dcb2]*delta_b+cb2*(-theta_b-metric_continuity)-a_prime_over_a*R/4.*delta_g+R/4.*4./3.*(-theta_g-metric_continuity)))/(1.+R); /* zero-order quantities g0, g0', go'' */ g0 = -a_prime_over_a*theta_b + k2*(cb2*delta_b-delta_g/4.); g0_prime = -a_prime_over_a*theta_prime-(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_b+k2*(pvecthermo[pth->index_th_dcb2]*delta_b+(1./3.-cb2)*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])); g0_prime_prime = -a_prime_over_a*theta_prime_prime-2.*(a_primeprime_over_a-a_prime_over_a*a_prime_over_a)*theta_prime -(2.*a_prime_over_a*a_prime_over_a*a_prime_over_a-3.*a_primeprime_over_a*a_prime_over_a)*theta_b +k2*(pvecthermo[pth->index_th_ddcb2]*delta_b-2.*pvecthermo[pth->index_th_dcb2]*(theta_b+0.5*pvecmetric[ppw->index_mt_h_prime])+(1./3.-cb2)*(theta_prime+0.5*pvecmetric[ppw->index_mt_h_prime_prime])); /* slip at second order */ slip = (1.-2*a_prime_over_a*F)*slip + F*k2*s2_squared*(2.*a_prime_over_a*shear_g+shear_g_prime) -F*(F_prime_prime*g0+2.*F_prime*g0_prime+F*g0_prime_prime); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime); } } /** - ---> add only the most important 2nd order terms */ if (ppr->tight_coupling_approximation == (int)compromise_CLASS) { /* slip at second order (only leading second-order terms) */ slip = (1.-2.*a_prime_over_a*F)*slip + F*k2*(2.*a_prime_over_a*s2_squared*shear_g+s2_squared*shear_g_prime-(1./3.-cb2)*(F*theta_prime+2.*F_prime*theta_b)); /* second-order correction to shear */ shear_g = (1.-11./6.*dtau_c)*shear_g-11./6.*tau_c*16./45.*tau_c*(theta_prime+metric_shear_prime); } /** - ---> store tight-coupling values of photon shear and its derivative */ ppw->tca_shear_g = shear_g; ppw->tca_slip = slip; return _SUCCESS_; } int perturb_rsa_delta_and_theta( struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, double k, double * y, double a_prime_over_a, double * pvecthermo, struct perturb_workspace * ppw ) { /* - define local variables */ double k2; k2 = k*k; // formulas below TBC for curvaturema /* newtonian gauge */ if (ppt->gauge == newtonian) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_g = 0.; ppw->rsa_theta_g = 0.; } else { ppw->rsa_delta_g = -4.*y[ppw->pv->index_pt_phi]; ppw->rsa_theta_g = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime]; } if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) { ppw->rsa_delta_g += -4./k2*ppw->pvecthermo[pth->index_th_dkappa]*y[ppw->pv->index_pt_theta_b]; ppw->rsa_theta_g += 3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]*y[ppw->pv->index_pt_theta_b] +ppw->pvecthermo[pth->index_th_dkappa]* (-a_prime_over_a*y[ppw->pv->index_pt_theta_b] +ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b] +k2*y[ppw->pv->index_pt_phi])); } if (pba->has_ur == _TRUE_) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_ur = 0.; ppw->rsa_theta_ur = 0.; } else { ppw->rsa_delta_ur = -4.*y[ppw->pv->index_pt_phi]; ppw->rsa_theta_ur = 6.*ppw->pvecmetric[ppw->index_mt_phi_prime]; } } } } /* synchronous gauge */ if (ppt->gauge == synchronous) { if (ppw->approx[ppw->index_ap_rsa] == (int)rsa_on) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_g = 0.; ppw->rsa_theta_g = 0.; } else { ppw->rsa_delta_g = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] -k2*y[ppw->pv->index_pt_eta]); ppw->rsa_theta_g = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime]; } if (ppr->radiation_streaming_approximation == rsa_MD_with_reio) { ppw->rsa_delta_g += -4./k2*ppw->pvecthermo[pth->index_th_dkappa]*(y[ppw->pv->index_pt_theta_b]+0.5*ppw->pvecmetric[ppw->index_mt_h_prime]); ppw->rsa_theta_g += 3./k2*(ppw->pvecthermo[pth->index_th_ddkappa]* (y[ppw->pv->index_pt_theta_b] +0.5*ppw->pvecmetric[ppw->index_mt_h_prime]) +ppw->pvecthermo[pth->index_th_dkappa]* (-a_prime_over_a*y[ppw->pv->index_pt_theta_b] + ppw->pvecthermo[pth->index_th_cb2]*k2*y[ppw->pv->index_pt_delta_b] -a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] +k2*y[ppw->pv->index_pt_eta])); } if (pba->has_ur == _TRUE_) { if (ppr->radiation_streaming_approximation == rsa_null) { ppw->rsa_delta_ur = 0.; ppw->rsa_theta_ur = 0.; } else { ppw->rsa_delta_ur = 4./k2*(a_prime_over_a*ppw->pvecmetric[ppw->index_mt_h_prime] -k2*y[ppw->pv->index_pt_eta]); ppw->rsa_theta_ur = -0.5*ppw->pvecmetric[ppw->index_mt_h_prime]; } } } } return _SUCCESS_; }
kmeans.c
#include "learn/PQ/vlfeat/vl/kmeans.h" #include "learn/PQ/vlfeat/vl/generic.h" #include "learn/PQ/vlfeat/vl/mathop.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /* ================================================================ */ #ifndef VL_KMEANS_INSTANTIATING /** ------------------------------------------------------------------ ** @brief Reset state ** ** The function reset the state of the KMeans object. It deletes ** any stored centers, releasing the corresponding memory. This ** cancels the effect of seeding or setting the centers, but ** does not change the other configuration parameters. **/ VL_EXPORT void vl_kmeans_reset (VlKMeans * self) { self->numCenters = 0 ; self->dimension = 0 ; if (self->centers) vl_free(self->centers) ; if (self->centerDistances) vl_free(self->centerDistances) ; self->centers = NULL ; self->centerDistances = NULL ; } /** ------------------------------------------------------------------ ** @brief Create a new KMeans object ** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE) ** @param distance distance. ** @return new KMeans object instance. **/ VL_EXPORT VlKMeans * vl_kmeans_new (vl_type dataType, VlVectorComparisonType distance) { VlKMeans * self = vl_calloc(1, sizeof(VlKMeans)) ; self->algorithm = VlKMeansLloyd ; self->distance = distance ; self->dataType = dataType ; self->verbosity = 0 ; self->maxNumIterations = 100 ; self->minEnergyVariation = 1e-4 ; self->numRepetitions = 1 ; self->centers = NULL ; self->centerDistances = NULL ; self->numTrees = 3; self->maxNumComparisons = 100; vl_kmeans_reset (self) ; return self ; } /** ------------------------------------------------------------------ ** @brief Create a new KMeans object by copy ** @param kmeans KMeans object to copy. ** @return new copy. **/ VL_EXPORT VlKMeans * vl_kmeans_new_copy (VlKMeans const * kmeans) { VlKMeans * self = vl_malloc(sizeof(VlKMeans)) ; self->algorithm = kmeans->algorithm ; self->distance = kmeans->distance ; self->dataType = kmeans->dataType ; self->verbosity = kmeans->verbosity ; self->maxNumIterations = kmeans->maxNumIterations ; self->numRepetitions = kmeans->numRepetitions ; self->dimension = kmeans->dimension ; self->numCenters = kmeans->numCenters ; self->centers = NULL ; self->centerDistances = NULL ; self->numTrees = kmeans->numTrees; self->maxNumComparisons = kmeans->maxNumComparisons; if (kmeans->centers) { vl_size dataSize = vl_get_type_size(self->dataType) * self->dimension * self->numCenters ; self->centers = vl_malloc(dataSize) ; memcpy (self->centers, kmeans->centers, dataSize) ; } if (kmeans->centerDistances) { vl_size dataSize = vl_get_type_size(self->dataType) * self->numCenters * self->numCenters ; self->centerDistances = vl_malloc(dataSize) ; memcpy (self->centerDistances, kmeans->centerDistances, dataSize) ; } return self ; } /** ------------------------------------------------------------------ ** @brief Deletes a KMeans object ** @param self KMeans object instance. ** ** The function deletes the KMeans object instance created ** by ::vl_kmeans_new. **/ VL_EXPORT void vl_kmeans_delete (VlKMeans * self) { vl_kmeans_reset (self) ; vl_free (self) ; } /* an helper structure */ typedef struct _VlKMeansSortWrapper { vl_uint32 * permutation ; void const * data ; vl_size stride ; } VlKMeansSortWrapper ; /* ---------------------------------------------------------------- */ /* Instantiate shuffle algorithm */ #define VL_SHUFFLE_type vl_uindex #define VL_SHUFFLE_prefix _vl_kmeans #include "shuffle-def.h" /* #ifdef VL_KMEANS_INSTANTITATING */ #endif /* ================================================================ */ #ifdef VL_KMEANS_INSTANTIATING /* ---------------------------------------------------------------- */ /* Set centers */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_set_centers_, SFX) (VlKMeans * self, TYPE const * centers, vl_size dimension, vl_size numCenters) { self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; memcpy ((TYPE*)self->centers, centers, sizeof(TYPE) * dimension * numCenters) ; } /* ---------------------------------------------------------------- */ /* Random seeding */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_init_centers_with_rand_data_, SFX) (VlKMeans * self, TYPE const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex i, j, k ; VlRand * rand = vl_get_rand () ; self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; { vl_uindex * perm = vl_malloc (sizeof(vl_uindex) * numData) ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif TYPE * distances = vl_malloc (sizeof(TYPE) * numCenters) ; /* get a random permutation of the data point */ for (i = 0 ; i < numData ; ++i) perm[i] = i ; _vl_kmeans_shuffle (perm, numData, rand) ; for (k = 0, i = 0 ; k < numCenters ; ++ i) { /* compare the next data point to all centers collected so far to detect duplicates (if there are enough left) */ if (numCenters - k < numData - i) { vl_bool duplicateDetected = VL_FALSE ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distances, dimension, data + dimension * perm[i], 1, (TYPE*)self->centers, k, distFn) ; for (j = 0 ; j < k ; ++j) { duplicateDetected |= (distances[j] == 0) ; } if (duplicateDetected) continue ; } /* ok, it is not a duplicate so we can accept it! */ memcpy ((TYPE*)self->centers + dimension * k, data + dimension * perm[i], sizeof(TYPE) * dimension) ; k ++ ; } vl_free(distances) ; vl_free(perm) ; } } /* ---------------------------------------------------------------- */ /* kmeans++ seeding */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_init_centers_plus_plus_, SFX) (VlKMeans * self, TYPE const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex x, c ; VlRand * rand = vl_get_rand () ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; TYPE * minDistances = vl_malloc (sizeof(TYPE) * numData) ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; for (x = 0 ; x < numData ; ++x) { minDistances[x] = (TYPE) VL_INFINITY_D ; } /* select the first point at random */ x = vl_rand_uindex (rand, numData) ; c = 0 ; while (1) { TYPE energy = 0 ; TYPE acc = 0 ; TYPE thresh = (TYPE) vl_rand_real1 (rand) ; memcpy ((TYPE*)self->centers + c * dimension, data + x * dimension, sizeof(TYPE) * dimension) ; c ++ ; if (c == numCenters) break ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX) (distances, dimension, (TYPE*)self->centers + (c - 1) * dimension, 1, data, numData, distFn) ; for (x = 0 ; x < numData ; ++x) { minDistances[x] = VL_MIN(minDistances[x], distances[x]) ; energy += minDistances[x] ; } for (x = 0 ; x < numData - 1 ; ++x) { acc += minDistances[x] ; if (acc >= thresh * energy) break ; } } vl_free(distances) ; vl_free(minDistances) ; } /* ---------------------------------------------------------------- */ /* Quantization */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_quantize_, SFX) (VlKMeans * self, vl_uint32 * assignments, TYPE * distances, TYPE const * data, vl_size numData) { vl_index i ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif #ifdef _OPENMP #pragma omp parallel default(none) \ shared(self, distances, assignments, numData, distFn, data) \ num_threads(vl_get_max_threads()) #endif { /* vl_malloc cannot be used here if mapped to MATLAB malloc */ TYPE * distanceToCenters = malloc(sizeof(TYPE) * self->numCenters) ; #ifdef _OPENMP #pragma omp for #endif for (i = 0 ; i < (signed)numData ; ++i) { vl_uindex k ; TYPE bestDistance = (TYPE) VL_INFINITY_D ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distanceToCenters, self->dimension, data + self->dimension * i, 1, (TYPE*)self->centers, self->numCenters, distFn) ; for (k = 0 ; k < self->numCenters ; ++k) { if (distanceToCenters[k] < bestDistance) { bestDistance = distanceToCenters[k] ; assignments[i] = (vl_uint32)k ; } } if (distances) distances[i] = bestDistance ; } free(distanceToCenters) ; } } /* ---------------------------------------------------------------- */ /* ANN quantization */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_quantize_ann_, SFX) (VlKMeans * self, vl_uint32 * assignments, TYPE * distances, TYPE const * data, vl_size numData, vl_bool update) { #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif VlKDForest * forest = vl_kdforest_new(self->dataType,self->dimension,self->numTrees, self->distance) ; vl_kdforest_set_max_num_comparisons(forest,self->maxNumComparisons); vl_kdforest_set_thresholding_method(forest,VL_KDTREE_MEDIAN); vl_kdforest_build(forest,self->numCenters,self->centers); #ifdef _OPENMP #pragma omp parallel default(none) \ num_threads(vl_get_max_threads()) \ shared(self, forest, update, assignments, distances, data, numData, distFn) #endif { VlKDForestNeighbor neighbor ; VlKDForestSearcher * searcher ; vl_index x; #ifdef _OPENMP #pragma omp critical #endif searcher = vl_kdforest_new_searcher (forest) ; #ifdef _OPENMP #pragma omp for #endif for(x = 0 ; x < (signed)numData ; ++x) { vl_kdforestsearcher_query (searcher, &neighbor, 1, (TYPE const *) (data + x*self->dimension)); if (distances) { if(!update) { distances[x] = (TYPE) neighbor.distance; assignments[x] = (vl_uint32) neighbor.index ; } else { TYPE prevDist = (TYPE) distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension *assignments[x]); if (prevDist > (TYPE) neighbor.distance) { distances[x] = (TYPE) neighbor.distance ; assignments[x] = (vl_uint32) neighbor.index ; } else { distances[x] = prevDist ; } } } else { assignments[x] = (vl_uint32) neighbor.index ; } } /* end for */ } /* end of parallel region */ vl_kdforest_delete(forest); } /* ---------------------------------------------------------------- */ /* Helper functions */ /* ---------------------------------------------------------------- */ /* The sorting routine is used to find increasing permutation of each * data dimension. This is used to quickly find the median for l1 * distance clustering. */ VL_INLINE TYPE VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp) (VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB) { return ((TYPE*)array->data) [array->permutation[indexA] * array->stride] - ((TYPE*)array->data) [array->permutation[indexB] * array->stride] ; } VL_INLINE void VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap) (VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB) { vl_uint32 tmp = array->permutation[indexA] ; array->permutation[indexA] = array->permutation[indexB] ; array->permutation[indexB] = tmp ; } #define VL_QSORT_prefix VL_XCAT3(_vl_kmeans_, SFX, _qsort) #define VL_QSORT_array VlKMeansSortWrapper* #define VL_QSORT_cmp VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp) #define VL_QSORT_swap VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap) #include "qsort-def.h" static void VL_XCAT(_vl_kmeans_sort_data_helper_, SFX) (VlKMeans * self, vl_uint32 * permutations, TYPE const * data, vl_size numData) { vl_uindex d, x ; for (d = 0 ; d < self->dimension ; ++d) { VlKMeansSortWrapper array ; array.permutation = permutations + d * numData ; array.data = data + d ; array.stride = self->dimension ; for (x = 0 ; x < numData ; ++x) { array.permutation[x] = (vl_uint32)x ; } VL_XCAT3(_vl_kmeans_, SFX, _qsort_sort)(&array, numData) ; } } /* ---------------------------------------------------------------- */ /* Lloyd refinement */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size c, d, x, iteration ; double previousEnergy = VL_INFINITY_D ; double initialEnergy = VL_INFINITY_D ; double energy ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; VlRand * rand = vl_get_rand () ; vl_size totNumRestartedCenters = 0 ; vl_size numRestartedCenters = 0 ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } for (energy = VL_INFINITY_D, iteration = 0; 1 ; ++ iteration) { /* assign data to cluters */ VL_XCAT(_vl_kmeans_quantize_, SFX)(self, assignments, distances, data, numData) ; /* compute energy */ energy = 0 ; for (x = 0 ; x < numData ; ++x) energy += distances[x] ; if (self->verbosity) { VL_PRINTF("kmeans: Lloyd iter %d: energy = %g\n", iteration, energy) ; } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: Lloyd terminating because maximum number of iterations reached\n") ; } break ; } if (energy == previousEnergy) { if (self->verbosity) { VL_PRINTF("kmeans: Lloyd terminating because the algorithm fully converged\n") ; } break ; } if (iteration == 0) { initialEnergy = energy ; } else { double eps = (previousEnergy - energy) / (initialEnergy - energy) ; if (eps < self->minEnergyVariation) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ; } break ; } } /* begin next iteration */ previousEnergy = energy ; /* update clusters */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < numData ; ++x) { clusterMasses[assignments[x]] ++ ; } numRestartedCenters = 0 ; switch (self->distance) { case VlDistanceL2: memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < numData ; ++x) { TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { ((TYPE*)self->centers) [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } } break ; default: abort(); } /* done compute centers */ totNumRestartedCenters += numRestartedCenters ; if (self->verbosity && numRestartedCenters) { VL_PRINTF("kmeans: Lloyd iter %d: restarted %d centers\n", iteration, numRestartedCenters) ; } } /* next Lloyd iteration */ if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; return energy ; } static double VL_XCAT(_vl_kmeans_update_center_distances_, SFX) (VlKMeans * self) { #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif if (! self->centerDistances) { self->centerDistances = vl_malloc (sizeof(TYPE) * self->numCenters * self->numCenters) ; } VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(self->centerDistances, self->dimension, self->centers, self->numCenters, NULL, 0, distFn) ; return self->numCenters * (self->numCenters - 1) / 2 ; } static double VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size c, d, x, iteration ; double initialEnergy = VL_INFINITY_D ; double previousEnergy = VL_INFINITY_D ; double energy ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; VlRand * rand = vl_get_rand () ; vl_size totNumRestartedCenters = 0 ; vl_size numRestartedCenters = 0 ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } for (energy = VL_INFINITY_D, iteration = 0; 1 ; ++ iteration) { /* assign data to cluters */ VL_XCAT(_vl_kmeans_quantize_ann_, SFX)(self, assignments, distances, data, numData, iteration > 0) ; /* compute energy */ energy = 0 ; for (x = 0 ; x < numData ; ++x) energy += distances[x] ; if (self->verbosity) { VL_PRINTF("kmeans: ANN iter %d: energy = %g\n", iteration, energy) ; } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the maximum number of iterations has been reached\n") ; } break ; } if (energy == previousEnergy) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the algorithm fully converged\n") ; } break ; } if (iteration == 0) { initialEnergy = energy ; } else { double eps = (previousEnergy - energy) / (initialEnergy - energy) ; if (eps < self->minEnergyVariation) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ; } break ; } } /* begin next iteration */ previousEnergy = energy ; /* update clusters */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < numData ; ++x) { clusterMasses[assignments[x]] ++ ; } numRestartedCenters = 0 ; switch (self->distance) { case VlDistanceL2: memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < numData ; ++x) { TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { ((TYPE*)self->centers) [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } } break ; default: VL_PRINT("bad distance set: %d\n",self->distance); abort(); } /* done compute centers */ totNumRestartedCenters += numRestartedCenters ; if (self->verbosity && numRestartedCenters) { VL_PRINTF("kmeans: ANN iter %d: restarted %d centers\n", iteration, numRestartedCenters) ; } } if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; return energy ; } /* ---------------------------------------------------------------- */ /* Elkan refinement */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size d, iteration ; vl_index x ; vl_uint32 c, j ; vl_bool allDone ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; VlRand * rand = vl_get_rand () ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif TYPE * nextCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ; TYPE * pointToClosestCenterUB = vl_malloc (sizeof(TYPE) * numData) ; vl_bool * pointToClosestCenterUBIsStrict = vl_malloc (sizeof(vl_bool) * numData) ; TYPE * pointToCenterLB = vl_malloc (sizeof(TYPE) * numData * self->numCenters) ; TYPE * newCenters = vl_malloc(sizeof(TYPE) * self->dimension * self->numCenters) ; TYPE * centerToNewCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; double energy ; vl_size totDistanceComputationsToInit = 0 ; vl_size totDistanceComputationsToRefreshUB = 0 ; vl_size totDistanceComputationsToRefreshLB = 0 ; vl_size totDistanceComputationsToRefreshCenterDistances = 0 ; vl_size totDistanceComputationsToNewCenters = 0 ; vl_size totDistanceComputationsToFinalize = 0 ; vl_size totNumRestartedCenters = 0 ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Initialization */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* An iteration is: get_new_centers + reassign + get_energy. This counts as iteration 0, where get_new_centers is assumed to be performed before calling the train function by the initialization function */ /* update distances between centers */ totDistanceComputationsToInit += VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ; /* assigmen points to the initial centers and initialize bounds */ memset(pointToCenterLB, 0, sizeof(TYPE) * self->numCenters * numData) ; for (x = 0 ; x < (signed)numData ; ++x) { TYPE distance ; /* do the first center */ assignments[x] = 0 ; distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + 0) ; pointToClosestCenterUB[x] = distance ; pointToClosestCenterUBIsStrict[x] = VL_TRUE ; pointToCenterLB[0 + x * self->numCenters] = distance ; totDistanceComputationsToInit += 1 ; /* do other centers */ for (c = 1 ; c < self->numCenters ; ++c) { /* Can skip if the center assigned so far is twice as close as its distance to the center under consideration */ if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + assignments[x] * self->numCenters]) { continue ; } distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + c * self->dimension) ; pointToCenterLB[c + x * self->numCenters] = distance ; totDistanceComputationsToInit += 1 ; if (distance < pointToClosestCenterUB[x]) { pointToClosestCenterUB[x] = distance ; assignments[x] = c ; } } } /* compute UB on energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++x) { energy += pointToClosestCenterUB[x] ; } if (self->verbosity) { VL_PRINTF("kmeans: Elkan iter 0: energy = %g, dist. calc. = %d\n", energy, totDistanceComputationsToInit) ; } /* #define SANITY*/ #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies after initial assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f\n", cc, xx, a, b) ; } } } #endif /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Iterations */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ for (iteration = 1 ; 1; ++iteration) { vl_size numDistanceComputationsToRefreshUB = 0 ; vl_size numDistanceComputationsToRefreshLB = 0 ; vl_size numDistanceComputationsToRefreshCenterDistances = 0 ; vl_size numDistanceComputationsToNewCenters = 0 ; vl_size numRestartedCenters = 0 ; /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Compute new centers */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < (signed)numData ; ++x) { clusterMasses[assignments[x]] ++ ; } switch (self->distance) { case VlDistanceL2: memset(newCenters, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < (signed)numData ; ++x) { TYPE * cpt = newCenters + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = newCenters + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { /* restart the center */ vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < (signed)numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { newCenters [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = newCenters + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; default: abort(); } /* done compute centers */ /* compute the distance from the old centers to the new centers */ for (c = 0 ; c < self->numCenters ; ++c) { TYPE distance = distFn(self->dimension, newCenters + c * self->dimension, (TYPE*)self->centers + c * self->dimension) ; centerToNewCenterDistances[c] = distance ; numDistanceComputationsToNewCenters += 1 ; } /* make the new centers current */ { TYPE * tmp = self->centers ; self->centers = newCenters ; newCenters = tmp ; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Reassign points to a centers */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Update distances between centers. */ numDistanceComputationsToRefreshCenterDistances += VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ; for (c = 0 ; c < self->numCenters ; ++c) { nextCenterDistances[c] = (TYPE) VL_INFINITY_D ; for (j = 0 ; j < self->numCenters ; ++j) { if (j == c) continue ; nextCenterDistances[c] = VL_MIN(nextCenterDistances[c], ((TYPE*)self->centerDistances) [j + c * self->numCenters]) ; } } /* Update upper bounds on point-to-closest-center distances based on the center variation. */ for (x = 0 ; x < (signed)numData ; ++x) { TYPE a = pointToClosestCenterUB[x] ; TYPE b = centerToNewCenterDistances[assignments[x]] ; if (self->distance == VlDistanceL1) { pointToClosestCenterUB[x] = a + b ; } else { #if (FLT == VL_TYPE_FLOAT) TYPE sqrtab = sqrtf (a * b) ; #else TYPE sqrtab = sqrt (a * b) ; #endif pointToClosestCenterUB[x] = a + b + 2.0 * sqrtab ; } pointToClosestCenterUBIsStrict[x] = VL_FALSE ; } /* Update lower bounds on point-to-center distances based on the center variation. */ #if defined(_OPENMP) #pragma omp parallel for default(shared) private(x,c) num_threads(vl_get_max_threads()) #endif for (x = 0 ; x < (signed)numData ; ++x) { for (c = 0 ; c < self->numCenters ; ++c) { TYPE a = pointToCenterLB[c + x * self->numCenters] ; TYPE b = centerToNewCenterDistances[c] ; if (a < b) { pointToCenterLB[c + x * self->numCenters] = 0 ; } else { if (self->distance == VlDistanceL1) { pointToCenterLB[c + x * self->numCenters] = a - b ; } else { #if (FLT == VL_TYPE_FLOAT) TYPE sqrtab = sqrtf (a * b) ; #else TYPE sqrtab = sqrt (a * b) ; #endif pointToCenterLB[c + x * self->numCenters] = a + b - 2.0 * sqrtab ; } } } } #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies before assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n", cc, xx, a, b, assignments[xx]) ; } } } #endif /* Scan the data and do the reassignments. Use the bounds to skip as many point-to-center distance calculations as possible. */ allDone = VL_TRUE ; #if defined(_OPENMP) #pragma omp parallel for \ default(none) \ shared(self,numData, \ pointToClosestCenterUB,pointToCenterLB, \ nextCenterDistances,pointToClosestCenterUBIsStrict, \ assignments,data,distFn,allDone) \ private(c,x) \ reduction(+:numDistanceComputationsToRefreshUB,numDistanceComputationsToRefreshLB) \ num_threads(vl_get_max_threads()) #endif for (x = 0 ; x < (signed)numData ; ++ x) { /* A point x sticks with its current center assignmets[x] the UB to d(x, c[assigmnets[x]]) is not larger than half the distance of c[assigments[x]] to any other center c. */ if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= nextCenterDistances[assignments[x]]) { continue ; } for (c = 0 ; c < self->numCenters ; ++c) { vl_uint32 cx = assignments[x] ; TYPE distance ; /* The point is not reassigned to a given center c if either: 0 - c is already the assigned center 1 - The UB of d(x, c[assignments[x]]) is smaller than half the distance of c[assigments[x]] to c, OR 2 - The UB of d(x, c[assignmets[x]]) is smaller than the LB of the distance of x to c. */ if (cx == c) { continue ; } if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + cx * self->numCenters]) { continue ; } if (pointToClosestCenterUB[x] <= pointToCenterLB [c + x * self->numCenters]) { continue ; } /* If the UB is loose, try recomputing it and test again */ if (! pointToClosestCenterUBIsStrict[x]) { distance = distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension * cx) ; pointToClosestCenterUB[x] = distance ; pointToClosestCenterUBIsStrict[x] = VL_TRUE ; pointToCenterLB[cx + x * self->numCenters] = distance ; numDistanceComputationsToRefreshUB += 1 ; if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + cx * self->numCenters]) { continue ; } if (pointToClosestCenterUB[x] <= pointToCenterLB [c + x * self->numCenters]) { continue ; } } /* Now the UB is strict (equal to d(x, assignments[x])), but we still could not exclude that x should be reassigned to c. We therefore compute the distance, update the LB, and check if a reassigmnet must be made */ distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + c * self->dimension) ; numDistanceComputationsToRefreshLB += 1 ; pointToCenterLB[c + x * self->numCenters] = distance ; if (distance < pointToClosestCenterUB[x]) { assignments[x] = c ; pointToClosestCenterUB[x] = distance ; allDone = VL_FALSE ; /* the UB strict flag is already set here */ } } /* assign center */ } /* next data point */ totDistanceComputationsToRefreshUB += numDistanceComputationsToRefreshUB ; totDistanceComputationsToRefreshLB += numDistanceComputationsToRefreshLB ; totDistanceComputationsToRefreshCenterDistances += numDistanceComputationsToRefreshCenterDistances ; totDistanceComputationsToNewCenters += numDistanceComputationsToNewCenters ; totNumRestartedCenters += numRestartedCenters ; #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies after assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n", cc, xx, a, b, assignments[xx]) ; } } } #endif /* compute UB on energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++x) { energy += pointToClosestCenterUB[x] ; } if (self->verbosity) { vl_size numDistanceComputations = numDistanceComputationsToRefreshUB + numDistanceComputationsToRefreshLB + numDistanceComputationsToRefreshCenterDistances + numDistanceComputationsToNewCenters ; VL_PRINTF("kmeans: Elkan iter %d: energy <= %g, dist. calc. = %d\n", iteration, energy, numDistanceComputations) ; if (numRestartedCenters) { VL_PRINTF("kmeans: Elkan iter %d: restarted %d centers\n", iteration, energy, numRestartedCenters) ; } if (self->verbosity > 1) { VL_PRINTF("kmeans: Elkan iter %d: total dist. calc. per type: " "UB: %.1f%% (%d), LB: %.1f%% (%d), " "intra_center: %.1f%% (%d), " "new_center: %.1f%% (%d)\n", iteration, 100.0 * numDistanceComputationsToRefreshUB / numDistanceComputations, numDistanceComputationsToRefreshUB, 100.0 *numDistanceComputationsToRefreshLB / numDistanceComputations, numDistanceComputationsToRefreshLB, 100.0 * numDistanceComputationsToRefreshCenterDistances / numDistanceComputations, numDistanceComputationsToRefreshCenterDistances, 100.0 * numDistanceComputationsToNewCenters / numDistanceComputations, numDistanceComputationsToNewCenters) ; } } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: Elkan terminating because maximum number of iterations reached\n") ; } break ; } if (allDone) { if (self->verbosity) { VL_PRINTF("kmeans: Elkan terminating because the algorithm fully converged\n") ; } break ; } } /* next Elkan iteration */ /* compute true energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++ x) { vl_uindex cx = assignments [x] ; energy += distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension * cx) ; totDistanceComputationsToFinalize += 1 ; } { vl_size totDistanceComputations = totDistanceComputationsToInit + totDistanceComputationsToRefreshUB + totDistanceComputationsToRefreshLB + totDistanceComputationsToRefreshCenterDistances + totDistanceComputationsToNewCenters + totDistanceComputationsToFinalize ; double saving = (double)totDistanceComputations / (iteration * self->numCenters * numData) ; if (self->verbosity) { VL_PRINTF("kmeans: Elkan: total dist. calc.: %d (%.2f %% of Lloyd)\n", totDistanceComputations, saving * 100.0) ; if (totNumRestartedCenters) { VL_PRINTF("kmeans: Elkan: there have been %d restarts\n", totNumRestartedCenters) ; } } if (self->verbosity > 1) { VL_PRINTF("kmeans: Elkan: total dist. calc. per type: " "init: %.1f%% (%d), UB: %.1f%% (%d), LB: %.1f%% (%d), " "intra_center: %.1f%% (%d), " "new_center: %.1f%% (%d), " "finalize: %.1f%% (%d)\n", 100.0 * totDistanceComputationsToInit / totDistanceComputations, totDistanceComputationsToInit, 100.0 * totDistanceComputationsToRefreshUB / totDistanceComputations, totDistanceComputationsToRefreshUB, 100.0 *totDistanceComputationsToRefreshLB / totDistanceComputations, totDistanceComputationsToRefreshLB, 100.0 * totDistanceComputationsToRefreshCenterDistances / totDistanceComputations, totDistanceComputationsToRefreshCenterDistances, 100.0 * totDistanceComputationsToNewCenters / totDistanceComputations, totDistanceComputationsToNewCenters, 100.0 * totDistanceComputationsToFinalize / totDistanceComputations, totDistanceComputationsToFinalize) ; } } if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; vl_free(nextCenterDistances) ; vl_free(pointToClosestCenterUB) ; vl_free(pointToClosestCenterUBIsStrict) ; vl_free(pointToCenterLB) ; vl_free(newCenters) ; vl_free(centerToNewCenterDistances) ; return energy ; } /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { switch (self->algorithm) { case VlKMeansLloyd: return VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)(self, data, numData) ; break ; case VlKMeansElkan: return VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)(self, data, numData) ; break ; case VlKMeansANN: return VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)(self, data, numData) ; break ; default: abort() ; } } /* VL_KMEANS_INSTANTIATING */ #else #ifndef __DOXYGEN__ #define FLT VL_TYPE_FLOAT #define TYPE float #define SFX f #define VL_KMEANS_INSTANTIATING #include "kmeans.c" #define FLT VL_TYPE_DOUBLE #define TYPE double #define SFX d #define VL_KMEANS_INSTANTIATING #include "kmeans.c" #endif /* VL_KMEANS_INSTANTIATING */ #endif /* ================================================================ */ #ifndef VL_KMEANS_INSTANTIATING /** ------------------------------------------------------------------ ** @brief Set centers ** @param self KMeans object. ** @param centers centers to copy. ** @param dimension data dimension. ** @param numCenters number of centers. **/ VL_EXPORT void vl_kmeans_set_centers (VlKMeans * self, void const * centers, vl_size dimension, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_set_centers_f (self, (float const *)centers, dimension, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_set_centers_d (self, (double const *)centers, dimension, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief init centers by randomly sampling data ** @param self KMeans object. ** @param data data to sample from. ** @param dimension data dimension. ** @param numData nmber of data points. ** @param numCenters number of centers. ** ** The function inits the KMeans centers by randomly sampling ** the data @a data. **/ VL_EXPORT void vl_kmeans_init_centers_with_rand_data (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_init_centers_with_rand_data_f (self, (float const *)data, dimension, numData, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_init_centers_with_rand_data_d (self, (double const *)data, dimension, numData, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Seed centers by the KMeans++ algorithm ** @param self KMeans object. ** @param data data to sample from. ** @param dimension data dimension. ** @param numData nmber of data points. ** @param numCenters number of centers. **/ VL_EXPORT void vl_kmeans_init_centers_plus_plus (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_init_centers_plus_plus_f (self, (float const *)data, dimension, numData, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_init_centers_plus_plus_d (self, (double const *)data, dimension, numData, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Quantize data ** @param self KMeans object. ** @param assignments data to closest center assignments (output). ** @param distances data to closest center distance (output). ** @param data data to quantize. ** @param numData number of data points to quantize. **/ VL_EXPORT void vl_kmeans_quantize (VlKMeans * self, vl_uint32 * assignments, void * distances, void const * data, vl_size numData) { switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_quantize_f (self, assignments, distances, (float const *)data, numData) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_quantize_d (self, assignments, distances, (double const *)data, numData) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Quantize data using approximate nearest neighbours (ANN). ** @param self KMeans object. ** @param assignments data to centers assignments (output). ** @param distances data to closes center distance (output) ** @param data data to quantize. ** @param numData number of data points. ** @param update choose wether to update current assignments. ** ** The function uses an ANN procedure to compute the approximate ** nearest neighbours of the input data point. ** ** Setting @a update to ::VL_TRUE will cause the algorithm ** to *update existing assignments*. This means that each ** element of @a assignments and @a distances is updated ony if the ** ANN procedure can find a better assignment of the existing one. **/ VL_EXPORT void vl_kmeans_quantize_ann (VlKMeans * self, vl_uint32 * assignments, void * distances, void const * data, vl_size numData, vl_bool update) { switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_quantize_ann_f (self, assignments, distances, (float const *)data, numData, update) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_quantize_ann_d (self, assignments, distances, (double const *)data, numData, update) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Refine center locations. ** @param self KMeans object. ** @param data data to quantize. ** @param numData number of data points. ** @return K-means energy at the end of optimization. ** ** The function calls the underlying K-means quantization algorithm ** (@ref VlKMeansAlgorithm) to quantize the specified data @a data. ** The function assumes that the cluster centers have already ** been assigned by using one of the seeding functions, or by ** setting them. **/ VL_EXPORT double vl_kmeans_refine_centers (VlKMeans * self, void const * data, vl_size numData) { assert (self->centers) ; switch (self->dataType) { case VL_TYPE_FLOAT : return _vl_kmeans_refine_centers_f (self, (float const *)data, numData) ; case VL_TYPE_DOUBLE : return _vl_kmeans_refine_centers_d (self, (double const *)data, numData) ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Cluster data. ** @param self KMeans object. ** @param data data to quantize. ** @param dimension data dimension. ** @param numData number of data points. ** @param numCenters number of clusters. ** @return K-means energy at the end of optimization. ** ** The function initializes the centers by using the initialization ** algorithm set by ::vl_kmeans_set_initialization and refines them ** by the quantization algorithm set by ::vl_kmeans_set_algorithm. ** The process is repeated one or more times (see ** ::vl_kmeans_set_num_repetitions) and the resutl with smaller ** energy is retained. **/ VL_EXPORT double vl_kmeans_cluster (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex repetition ; double bestEnergy = VL_INFINITY_D ; void * bestCenters = NULL ; for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) { double energy ; double timeRef ; if (self->verbosity) { VL_PRINTF("kmeans: repetition %d of %d\n", repetition + 1, self->numRepetitions) ; } timeRef = vl_get_cpu_time() ; switch (self->initialization) { case VlKMeansRandomSelection : vl_kmeans_init_centers_with_rand_data (self, data, dimension, numData, numCenters) ; break ; case VlKMeansPlusPlus : vl_kmeans_init_centers_plus_plus (self, data, dimension, numData, numCenters) ; break ; default: abort() ; } if (self->verbosity) { VL_PRINTF("kmeans: K-means initialized in %.2f s\n", vl_get_cpu_time() - timeRef) ; } timeRef = vl_get_cpu_time () ; energy = vl_kmeans_refine_centers (self, data, numData) ; if (self->verbosity) { VL_PRINTF("kmeans: K-means terminated in %.2f s with energy %g\n", vl_get_cpu_time() - timeRef, energy) ; } /* copy centers to output if current solution is optimal */ /* check repetition == 0 as well in case energy = NaN, which */ /* can happen if the data contain NaNs */ if (energy < bestEnergy || repetition == 0) { void * temp ; bestEnergy = energy ; if (bestCenters == NULL) { bestCenters = vl_malloc(vl_get_type_size(self->dataType) * self->dimension * self->numCenters) ; } /* swap buffers */ temp = bestCenters ; bestCenters = self->centers ; self->centers = temp ; } /* better energy */ } /* next repetition */ vl_free (self->centers) ; self->centers = bestCenters ; return bestEnergy ; } /* VL_KMEANS_INSTANTIATING */ #endif #undef SFX #undef TYPE #undef FLT #undef VL_KMEANS_INSTANTIATING
generator_spgemm_csc_bsparse.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ /** * @file * This file is part of GemmCodeGenerator. * * @author Alexander Heinecke (alexander.heinecke AT mytum.de, http://www5.in.tum.de/wiki/index.php/Alexander_Heinecke,_M.Sc.,_M.Sc._with_honors) * * @section LICENSE * Copyright (c) 2012-2014, Technische Universitaet Muenchen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @section DESCRIPTION * <DESCRIPTION> */ #include "generator_spgemm_csc_bsparse.h" #include "generator_common.h" #include "libxsmm_main.h" LIBXSMM_API_INTERN void libxsmm_generator_spgemm_csc_bsparse( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc, const char* i_arch, const unsigned int* i_row_idx, const unsigned int* i_column_idx, const double* i_values ) { unsigned int l_n; unsigned int l_z; unsigned int l_column_elements; unsigned int l_flop_count = 0; char l_new_code[512]; int l_max_code_length = 511; int l_code_length = 0; LIBXSMM_UNUSED(i_values); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_m = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* reset C if beta is zero */ if (0 != (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=0 */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " unsigned int l_n = 0;\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_n = 0; l_n < %u; l_n++) {\n", (unsigned int)i_xgemm_desc->n); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); if ( i_xgemm_desc->m > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } if ( LIBXSMM_DATATYPE_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) { C[(l_n*%u)+l_m] = 0.0; }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc); } else { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) { C[(l_n*%u)+l_m] = 0.0f; }\n", (unsigned int)i_xgemm_desc->m, (unsigned int)i_xgemm_desc->ldc); } libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* determine the correct simd pragma for each architecture */ if ( ( strcmp( i_arch, "noarch" ) == 0 ) || ( strcmp( i_arch, "wsm" ) == 0 ) || ( strcmp( i_arch, "snb" ) == 0 ) || ( strcmp( i_arch, "hsw" ) == 0 ) ) { if ( i_xgemm_desc->m > 7 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(8)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else if ( i_xgemm_desc->m > 3 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(4)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else if ( i_xgemm_desc->m > 1 ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(2)\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } else {} if ( (i_xgemm_desc->m > 1) && ((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) && ((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } else if ( ( strcmp( i_arch, "knl" ) == 0 ) || ( strcmp( i_arch, "skx" ) == 0 ) || ( strcmp( i_arch, "clx" ) == 0 ) || ( strcmp( i_arch, "cpx" ) == 0 ) ) { if ( (i_xgemm_desc->m > 1) && ((LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0) && ((LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0) ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " #pragma simd vectorlength(32)\n #pragma vector aligned\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } else { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } /* generate the actual kernel */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " for ( l_m = 0; l_m < %u; l_m++) {\n", (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); for ( l_n = 0; l_n < (unsigned int)i_xgemm_desc->n; l_n++ ) { l_column_elements = i_column_idx[l_n+1] - i_column_idx[l_n]; for ( l_z = 0; l_z < l_column_elements; l_z++ ) { /* check k such that we just use rows which actually need to be multiplied */ if ( i_row_idx[i_column_idx[l_n] + l_z] < (unsigned int)i_xgemm_desc->k ) { l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " C[%u+l_m] += A[%u+l_m] * B[%u];\n", l_n * i_xgemm_desc->ldc, i_row_idx[i_column_idx[l_n] + l_z]*i_xgemm_desc->lda, i_column_idx[l_n] + l_z); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_flop_count += 2; } } } l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, " }\n"); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); /* add flop counter */ l_code_length = LIBXSMM_SNPRINTF(l_new_code, l_max_code_length, "\n#ifndef NDEBUG\n#ifdef _OPENMP\n#pragma omp atomic\n#endif\nlibxsmm_num_total_flops += %u;\n#endif\n", l_flop_count * (unsigned int)i_xgemm_desc->m); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); }
single.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif int main(int argc, char ** argv) { int n = 9, i, a, b[n]; for (i=0; i<n; i++) b[i] = -1; #ifdef _OPENMP #pragma omp parallel #endif { #ifdef _OPENMP #pragma omp single #endif { printf("Introduce valor de inicialización a: "); scanf("%d", &a); printf("Single ejecutada por el thread %d\n", omp_get_thread_num()); } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) b[i] = a; } printf("Depués de la región parallel:\n"); for (i=0; i<n; i++) printf("b[%d] = %d\t",i,b[i]); printf("\n"); return 0; }
GB_unaryop__minv_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_uint8 // op(A') function: GB_tran__minv_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ordered-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ /* LLVM LOCAL test not applicable */ /* { dg-require-fdump "" } */ extern void bar(int); void foo (void) { #pragma omp ordered bar(0); #pragma omp ordered { bar(1); bar(2); } } /* { dg-final { scan-tree-dump-times "GOMP_ordered_start" 2 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_ordered_end" 2 "ompexp" } } */ /* { dg-final { cleanup-tree-dump "ompexp" } } */
findpath.c
/* gcc -fopenmp -g3 -DTEST_FINDPATH findpath.c -o FINDpath -lRNA -lm -I../ -L./ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #include "ViennaRNA/findpath.h" #include "ViennaRNA/datastructures/basic.h" #include "ViennaRNA/model.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/fold.h" #include "ViennaRNA/cofold.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/utils/structures.h" #ifdef _OPENMP #include <omp.h> #endif #define LOOP_EN /** * @brief */ typedef struct move { int i; /* i,j>0 insert; i,j<0 delete */ int j; int when; /* 0 if still available, else resulting distance from start */ int E; } move_t; /** * @brief */ typedef struct intermediate { short *pt; /**< @brief pair table */ int Sen; /**< @brief saddle energy so far */ int curr_en; /**< @brief current energy */ move_t *moves; /**< @brief remaining moves to target */ } intermediate_t; /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ PRIVATE int BP_dist; PRIVATE move_t *path = NULL; PRIVATE int path_fwd; /* 1: s1->s2, else s2 -> s1 */ PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; #ifdef _OPENMP /* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate */ #pragma omp threadprivate(BP_dist, path, path_fwd, backward_compat_compound) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE move_t * copy_moves(move_t *mvs); PRIVATE int compare_ptable(const void *A, const void *B); PRIVATE int compare_energy(const void *A, const void *B); PRIVATE int compare_moves_when(const void *A, const void *B); PRIVATE void free_intermediate(intermediate_t *i); #ifdef TEST_FINDPATH /* TEST_FINDPATH, COFOLD */ PRIVATE void usage(void); #endif PRIVATE int find_path_once(vrna_fold_compound_t *vc, const char *s1, const char *s2, int maxl, int maxE); PRIVATE int try_moves(vrna_fold_compound_t *vc, intermediate_t c, int maxE, intermediate_t *next, int dist); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC void free_path(vrna_path_t *path) { vrna_path_t *tmp = path; if (tmp) { while (tmp->s) { free(tmp->s); tmp++; } free(path); } } PUBLIC int find_saddle(const char *seq, const char *s1, const char *s2, int width) { int maxE; char *sequence; vrna_fold_compound_t *vc; vrna_md_t md, *md_p; vc = NULL; set_model_details(&md); if (backward_compat_compound) { if (!strcmp(seq, backward_compat_compound->sequence)) { /* check if sequence is the same as before */ md.window_size = backward_compat_compound->length; md.max_bp_span = backward_compat_compound->length; md_p = &(backward_compat_compound->params->model_details); if (!memcmp(&md, md_p, sizeof(vrna_md_t))) /* check if model_details are the same as before */ vc = backward_compat_compound; /* re-use previous vrna_fold_compound_t */ } } if (!vc) { vrna_fold_compound_free(backward_compat_compound); sequence = vrna_cut_point_insert(seq, cut_point); backward_compat_compound = vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_EVAL_ONLY); free(sequence); } maxE = vrna_path_findpath_saddle(vc, s1, s2, width); return maxE; } PUBLIC int vrna_path_findpath_saddle(vrna_fold_compound_t *vc, const char *s1, const char *s2, int width) { return vrna_path_findpath_saddle_ub(vc, s1, s2, width, INT_MAX - 1); } PUBLIC int vrna_path_findpath_saddle_ub(vrna_fold_compound_t *vc, const char *s1, const char *s2, int width, int maxE) { int maxl; const char *tmp; move_t *bestpath = NULL; int dir; path_fwd = dir = 0; maxl = 1; do { int saddleE; path_fwd = !path_fwd; if (maxl > width) maxl = width; if (path) free(path); saddleE = find_path_once(vc, s1, s2, maxl, maxE); if (saddleE < maxE) { maxE = saddleE; if (bestpath) free(bestpath); bestpath = path; path = NULL; dir = path_fwd; } else { free(path); path = NULL; } tmp = s1; s1 = s2; s2 = tmp; maxl *= 2; } while (maxl < 2 * width); /* (re)set some globals */ path = bestpath; path_fwd = dir; return maxE; } PUBLIC vrna_path_t * get_path(const char *seq, const char *s1, const char *s2, int maxkeep) { vrna_path_t *route = NULL; char *sequence = NULL; vrna_fold_compound_t *vc = NULL; vrna_md_t md, *md_p; set_model_details(&md); if (backward_compat_compound) { if (!strcmp(seq, backward_compat_compound->sequence)) { /* check if sequence is the same as before */ md.window_size = backward_compat_compound->length; md.max_bp_span = backward_compat_compound->length; md_p = &(backward_compat_compound->params->model_details); if (!memcmp(&md, md_p, sizeof(vrna_md_t))) /* check if model_details are the same as before */ vc = backward_compat_compound; /* re-use previous vrna_fold_compound_t */ } } if (!vc) { vrna_fold_compound_free(backward_compat_compound); sequence = vrna_cut_point_insert(seq, cut_point); backward_compat_compound = vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_EVAL_ONLY); free(sequence); } route = vrna_path_findpath(vc, s1, s2, maxkeep); return route; } PUBLIC vrna_path_t * vrna_path_findpath(vrna_fold_compound_t *vc, const char *s1, const char *s2, int width) { return vrna_path_findpath_ub(vc, s1, s2, width, INT_MAX - 1); } PUBLIC vrna_path_t * vrna_path_findpath_ub(vrna_fold_compound_t *vc, const char *s1, const char *s2, int width, int maxE) { int E, d; vrna_path_t *route = NULL; E = vrna_path_findpath_saddle_ub(vc, s1, s2, width, maxE); /* did we find a better path than one with saddle maxE? */ if (E < maxE) { route = (vrna_path_t *)vrna_alloc((BP_dist + 2) * sizeof(vrna_path_t)); qsort(path, BP_dist, sizeof(move_t), compare_moves_when); if (path_fwd) { /* memorize start of path */ route[0].s = strdup(s1); route[0].en = vrna_eval_structure(vc, s1); for (d = 0; d < BP_dist; d++) { int i, j; route[d + 1].s = strdup(route[d].s); i = path[d].i; j = path[d].j; if (i < 0) { /* delete */ route[d + 1].s[(-i) - 1] = route[d + 1].s[(-j) - 1] = '.'; } else { route[d + 1].s[i - 1] = '('; route[d + 1].s[j - 1] = ')'; } route[d + 1].en = path[d].E / 100.0; } } else { /* memorize start of path */ route[BP_dist].s = strdup(s2); route[BP_dist].en = vrna_eval_structure(vc, s2); for (d = 0; d < BP_dist; d++) { int i, j; route[BP_dist - d - 1].s = strdup(route[BP_dist - d].s); i = path[d].i; j = path[d].j; if (i < 0) { /* delete */ route[BP_dist - d - 1].s[(-i) - 1] = route[BP_dist - d - 1].s[(-j) - 1] = '.'; } else { route[BP_dist - d - 1].s[i - 1] = '('; route[BP_dist - d - 1].s[j - 1] = ')'; } route[BP_dist - d - 1].en = path[d].E / 100.0; } } #if _DEBUG_FINDPATH_ fprintf(stderr, "\n%s\n%s\n%s\n\n", seq, s1, s2); for (d = 0; d <= BP_dist; d++) fprintf(stderr, "%s %6.2f\n", route[d].s, route[d].en); fprintf(stderr, "%d\n", *num_entry); #endif } free(path); path = NULL; return route; } PRIVATE int try_moves(vrna_fold_compound_t *vc, intermediate_t c, int maxE, intermediate_t *next, int dist) { int *loopidx, len, num_next = 0, en, oldE; move_t *mv; short *pt; len = c.pt[0]; loopidx = vrna_loopidx_from_ptable(c.pt); oldE = c.Sen; for (mv = c.moves; mv->i != 0; mv++) { int i, j; if (mv->when > 0) continue; i = mv->i; j = mv->j; pt = (short *)vrna_alloc(sizeof(short) * (len + 1)); memcpy(pt, c.pt, (len + 1) * sizeof(short)); if (j < 0) { /*it's a delete move */ pt[-i] = 0; pt[-j] = 0; } else { /* insert move */ if ((loopidx[i] == loopidx[j]) && /* i and j belong to same loop */ (pt[i] == 0) && (pt[j] == 0) /* ... and are unpaired */ ) { pt[i] = j; pt[j] = i; } else { free(pt); continue; /* llegal move, try next; */ } } #ifdef LOOP_EN en = c.curr_en + vrna_eval_move_pt(vc, c.pt, i, j); #else en = vrna_eval_structure_pt(vc, pt); #endif if (en < maxE) { next[num_next].Sen = (en > oldE) ? en : oldE; next[num_next].curr_en = en; next[num_next].pt = pt; mv->when = dist; mv->E = en; next[num_next++].moves = copy_moves(c.moves); mv->when = 0; } else { free(pt); } } free(loopidx); return num_next; } PRIVATE int find_path_once(vrna_fold_compound_t *vc, const char *s1, const char *s2, int maxl, int maxE) { short *pt1, *pt2; move_t *mlist; int i, len, d, dist = 0, result; intermediate_t *current, *next; pt1 = vrna_ptable(s1); pt2 = vrna_ptable(s2); len = (int)strlen(s1); mlist = (move_t *)vrna_alloc(sizeof(move_t) * len); /* bp_dist < n */ for (i = 1; i <= len; i++) { if (pt1[i] != pt2[i]) { if (i < pt1[i]) { /* need to delete this pair */ mlist[dist].i = -i; mlist[dist].j = -pt1[i]; mlist[dist++].when = 0; } if (i < pt2[i]) { /* need to insert this pair */ mlist[dist].i = i; mlist[dist].j = pt2[i]; mlist[dist++].when = 0; } } } free(pt2); BP_dist = dist; current = (intermediate_t *)vrna_alloc(sizeof(intermediate_t) * (maxl + 1)); current[0].pt = pt1; current[0].Sen = current[0].curr_en = vrna_eval_structure_pt(vc, pt1); current[0].moves = mlist; next = (intermediate_t *)vrna_alloc(sizeof(intermediate_t) * (dist * maxl + 1)); for (d = 1; d <= dist; d++) { /* go through the distance classes */ int c, u, num_next = 0; intermediate_t *cc; for (c = 0; current[c].pt != NULL; c++) num_next += try_moves(vc, current[c], maxE, next + num_next, d); if (num_next == 0) { for (cc = current; cc->pt != NULL; cc++) free_intermediate(cc); current[0].Sen = INT_MAX; break; } /* remove duplicates via sort|uniq * if this becomes a bottleneck we can use a hash instead */ qsort(next, num_next, sizeof(intermediate_t), compare_ptable); for (u = 0, c = 1; c < num_next; c++) { if (memcmp(next[u].pt, next[c].pt, sizeof(short) * len) != 0) next[++u] = next[c]; else free_intermediate(next + c); } num_next = u + 1; qsort(next, num_next, sizeof(intermediate_t), compare_energy); /* free the old stuff */ for (cc = current; cc->pt != NULL; cc++) free_intermediate(cc); for (u = 0; u < maxl && u < num_next; u++) current[u] = next[u]; for (; u < num_next; u++) free_intermediate(next + u); num_next = 0; } free(next); path = current[0].moves; result = current[0].Sen; free(current[0].pt); free(current); return result; } PRIVATE void free_intermediate(intermediate_t *i) { free(i->pt); free(i->moves); i->pt = NULL; i->moves = NULL; i->Sen = INT_MAX; } PRIVATE int compare_ptable(const void *A, const void *B) { intermediate_t *a, *b; int c; a = (intermediate_t *)A; b = (intermediate_t *)B; c = memcmp(a->pt, b->pt, a->pt[0] * sizeof(short)); if (c != 0) return c; if ((a->Sen - b->Sen) != 0) return a->Sen - b->Sen; return a->curr_en - b->curr_en; } PRIVATE int compare_energy(const void *A, const void *B) { intermediate_t *a, *b; a = (intermediate_t *)A; b = (intermediate_t *)B; if ((a->Sen - b->Sen) != 0) return a->Sen - b->Sen; return a->curr_en - b->curr_en; } PRIVATE int compare_moves_when(const void *A, const void *B) { move_t *a, *b; a = (move_t *)A; b = (move_t *)B; return a->when - b->when; } PRIVATE move_t * copy_moves(move_t *mvs) { move_t *new; new = (move_t *)vrna_alloc(sizeof(move_t) * (BP_dist + 1)); memcpy(new, mvs, sizeof(move_t) * (BP_dist + 1)); return new; } #ifdef TEST_FINDPATH PUBLIC void print_path(const char *seq, const char *struc) { int d; char *s; s = strdup(struc); if (cut_point == -1) { printf("%s\n%s\n", seq, s); } /* printf("%s\n%s %6.2f\n", seq, s, vrna_eval_structure_simple(seq,s)); */ else { char *pstruct, *pseq; pstruct = vrna_cut_point_insert(s, cut_point); pseq = vrna_cut_point_insert(seq, cut_point); printf("%s\n%s\n", pseq, pstruct); /* printf("%s\n%s %6.2f\n", pseq, pstruct, vrna_eval_structure_simple(seq,s)); */ free(pstruct); free(pseq); } qsort(path, BP_dist, sizeof(move_t), compare_moves_when); for (d = 0; d < BP_dist; d++) { int i, j; i = path[d].i; j = path[d].j; if (i < 0) { /* delete */ s[(-i) - 1] = s[(-j) - 1] = '.'; } else { s[i - 1] = '('; s[j - 1] = ')'; } /* printf("%s %6.2f - %6.2f\n", s, vrna_eval_structure_simple(seq,s), path[d].E/100.0); */ } free(s); } int main(int argc, char *argv[]) { char *line, *seq, *s1, *s2; int E, maxkeep = 1000; int verbose = 0, i; vrna_path_t *route, *r; for (i = 1; i < argc; i++) { switch (argv[i][1]) { case 'm': if (strcmp(argv[i], "-m") == 0) sscanf(argv[++i], "%d", &maxkeep); break; case 'v': verbose = !strcmp(argv[i], "-v"); break; case 'd': if (strcmp(argv[i], "-d") == 0) sscanf(argv[++i], "%d", &dangles); break; default: usage(); } } cut_point = -1; line = vrna_read_line(stdin); seq = vrna_cut_point_remove(line, &cut_point); free(line); line = vrna_read_line(stdin); s1 = vrna_cut_point_remove(line, &cut_point); free(line); line = vrna_read_line(stdin); s2 = vrna_cut_point_remove(line, &cut_point); free(line); E = find_saddle(seq, s1, s2, maxkeep); printf("saddle_energy = %6.2f\n", E / 100.); if (verbose) { if (path_fwd) print_path(seq, s1); else print_path(seq, s2); free(path); path = NULL; route = get_path(seq, s1, s2, maxkeep); for (r = route; r->s; r++) { if (cut_point == -1) { printf("%s %6.2f\n", r->s, r->en); /* printf("%s %6.2f - %6.2f\n", r->s, vrna_eval_structure_simple(seq,r->s), r->en); */ } else { char *pstruct; pstruct = vrna_cut_point_insert(r->s, cut_point); printf("%s %6.2f\n", pstruct, r->en); /* printf("%s %6.2f - %6.2f\n", pstruct, vrna_eval_structure_simple(seq,r->s), r->en); */ free(pstruct); } free(r->s); } free(route); } free(seq); free(s1); free(s2); return EXIT_SUCCESS; } static void usage(void) { vrna_message_error("usage: findpath.c [-m depth] [-d[0|1|2]] [-v]"); } #endif
AzureAD_fmt_plug.c
/* * This software is Copyright (c) 2015 JimF, <jfoug at openwall.com>, and * it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Azure ActiveDirectory, V1 cracker patch for JtR. * * Algorithm: https://www.dsinternals.com/en/how-azure-active-directory-connect-syncs-passwords/ * * PBKDF2(UTF-16(uc(hex(MD4(UTF-16(password))))), rnd_salt(10), 100, HMAC-SHA256, 32) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_AzureAD; #elif FMT_REGISTERS_H john_register_one(&fmt_AzureAD); #else #include <string.h> #include "arch.h" #include "md4.h" #include "pbkdf2_hmac_sha256.h" #include "common.h" #include "formats.h" #include "base64_convert.h" #include "AzureAD_common.h" #include "unicode.h" #include "johnswap.h" //#undef SIMD_COEF_32 //#undef SIMD_PARA_SHA256 #ifdef _OPENMP #ifdef SIMD_COEF_32 #ifndef OMP_SCALE #define OMP_SCALE 64 // FIXME #endif #else #ifndef OMP_SCALE #define OMP_SCALE 64 // FIXME #endif #endif #include <omp.h> #endif #include "simd-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "AzureAD" #define FORMAT_NAME "" #define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA256) #else #define NBKEYS 1 #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define BINARY_SIZE DIGEST_SIZE #define BINARY_ALIGN 4 // For now, I will do md4() oSSL type for all passwords. There is so much // other overhead that adding the complexity to do SIMD md4 will gain us // almost nothing #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static char (*saved_nt)[64]; static int dirty; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_WORD); saved_nt = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_nt), MEM_ALIGN_WORD); crypt_out = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*crypt_out), MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_nt); MEM_FREE(saved_key); } static void *salt(char *ciphertext) { char Buf[120], *ctcopy=Buf; char *p; static struct AzureAD_custom_salt cs; memset(&cs, 0, sizeof(cs)); strncpy(Buf, ciphertext, 119); Buf[119] = 0; ctcopy += TAG_LENGTH; p = strtokm(ctcopy, ","); cs.salt_len = strlen(p)/2; base64_convert(p, e_b64_hex, cs.salt_len*2, cs.salt, e_b64_raw, cs.salt_len, 0, 0); p = strtokm(NULL, ","); cs.iterations = atoi(p); p = strtokm(Buf, ","); strncpy(cs.version, p, 8); cs.version[7] = 0; return (void *)&cs; } static void set_salt(void *salt) { AzureAD_cur_salt = (struct AzureAD_custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { strcpy(saved_key[index], key); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { // * PBKDF2(UTF-16(uc(hex(MD4(UTF-16(password))))), rnd_salt(10), 100, HMAC-SHA256, 32) // Trivial for now. Can optimized later. UTF16 Buf[PLAINTEXT_LENGTH+1]; unsigned char hash[16], hex[33]; int len, cnt, i; MD4_CTX ctx; #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT]; union { uint32_t *pout[MAX_KEYS_PER_CRYPT]; unsigned char *poutc; } x; cnt = MAX_KEYS_PER_CRYPT; #else cnt = 1; #endif if (dirty) for (i = 0; i < cnt; ++i) { len = enc_to_utf16(Buf, PLAINTEXT_LENGTH, (UTF8*)saved_key[index+i], strlen(saved_key[index+i])); if (len < 0) len = 0; MD4_Init(&ctx); MD4_Update(&ctx, Buf, len*2); MD4_Final(hash, &ctx); base64_convert(hash, e_b64_raw, 16, hex, e_b64_hex, sizeof(hex), flg_Base64_HEX_UPCASE, 0); for (len = 0; len < 32; ++len) saved_nt[index+i][len<<1] = hex[len]; } #ifdef SIMD_COEF_32 for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = 64; pin[i] = (unsigned char*)saved_nt[i+index]; x.pout[i] = crypt_out[i+index]; } pbkdf2_sha256_sse((const unsigned char **)pin, lens, AzureAD_cur_salt->salt, AzureAD_cur_salt->salt_len, AzureAD_cur_salt->iterations, &(x.poutc), 32, 0); #else pbkdf2_sha256((unsigned char *)saved_nt[index], 64, AzureAD_cur_salt->salt, AzureAD_cur_salt->salt_len, AzureAD_cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0); #endif } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], 4)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_AzureAD = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_UNICODE | FMT_UTF8, { NULL }, { FORMAT_TAG }, AzureAD_common_tests }, { init, done, fmt_default_reset, fmt_default_prepare, AzureAD_common_valid, AzureAD_common_split, AzureAD_common_get_binary, salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
ps.c
/*** Some usefull math macros ***/ #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) static double mnarg1,mnarg2; #define FMAX(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) > (mnarg2) ?\ (mnarg1) : (mnarg2)) static double mnarg1,mnarg2; #define FMIN(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) < (mnarg2) ?\ (mnarg1) : (mnarg2)) #define ERFC_NPTS (int) 75 #define ERFC_PARAM_DELTA (float) 0.1 static double log_erfc_table[ERFC_NPTS], erfc_params[ERFC_NPTS]; static gsl_interp_accel *erfc_acc; static gsl_spline *erfc_spline; #define NGaussLegendre 40 //defines the number of points in the Gauss-Legendre quadrature integration #define NMass 300 #define NSFR_high 200 #define NSFR_low 250 #define NGL_SFR 100 // 100 #define NMTURN 50//100 #define LOG10_MTURN_MAX ((double)(10)) #define LOG10_MTURN_MIN ((double)(5.-9e-8)) #define NR_END 1 #define FREE_ARG char* #define MM 7 #define NSTACK 50 #define EPS2 3.0e-11 #define Luv_over_SFR (double)(1./1.15/1e-28) // Luv/SFR = 1 / 1.15 x 10^-28 [M_solar yr^-1/erg s^-1 Hz^-1] // G. Sun and S. R. Furlanetto (2016) MNRAS, 417, 33 #define delta_lnMhalo (double)(5e-6) #define Mhalo_min (double)(1e6) #define Mhalo_max (double)(1e16) float calibrated_NF_min; double *deltaz, *deltaz_smoothed, *NeutralFractions, *z_Q, *Q_value, *nf_vals, *z_vals; int N_NFsamples,N_extrapolated, N_analytic, N_calibrated, N_deltaz; bool initialised_ComputeLF = false; gsl_interp_accel *LF_spline_acc; gsl_spline *LF_spline; gsl_interp_accel *deriv_spline_acc; gsl_spline *deriv_spline; struct CosmoParams *cosmo_params_ps; struct UserParams *user_params_ps; struct FlagOptions *flag_options_ps; //double sigma_norm, R, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR; double sigma_norm, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR; float MinMass, mass_bin_width, inv_mass_bin_width; double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias); float *Mass_InterpTable, *Sigma_InterpTable, *dSigmadm_InterpTable; float *log10_overdense_spline_SFR, *log10_Nion_spline, *Overdense_spline_SFR, *Nion_spline; float *prev_log10_overdense_spline_SFR, *prev_log10_Nion_spline, *prev_Overdense_spline_SFR, *prev_Nion_spline; float *Mturns, *Mturns_MINI; float *log10_Nion_spline_MINI, *Nion_spline_MINI; float *prev_log10_Nion_spline_MINI, *prev_Nion_spline_MINI; float *xi_SFR,*wi_SFR, *xi_SFR_Xray, *wi_SFR_Xray; float *overdense_high_table, *overdense_low_table, *log10_overdense_low_table; float **log10_SFRD_z_low_table, **SFRD_z_high_table; float **log10_SFRD_z_low_table_MINI, **SFRD_z_high_table_MINI; double *lnMhalo_param, *Muv_param, *Mhalo_param; double *log10phi, *M_uv_z, *M_h_z; double *lnMhalo_param_MINI, *Muv_param_MINI, *Mhalo_param_MINI; double *log10phi_MINI; *M_uv_z_MINI, *M_h_z_MINI; double *deriv, *lnM_temp, *deriv_temp; double *z_val, *z_X_val, *Nion_z_val, *SFRD_val; double *Nion_z_val_MINI, *SFRD_val_MINI; void initialiseSigmaMInterpTable(float M_Min, float M_Max); void freeSigmaMInterpTable(); void initialiseGL_Nion(int n, float M_Min, float M_Max); void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max); float Mass_limit (float logM, float PL, float FRAC); void bisection(float *x, float xlow, float xup, int *iter); float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC); double sheth_delc(double del, double sig); float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2); double dNion_ConditionallnM(double lnM, void *params); double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES); double dNion_ConditionallnM_MINI(double lnM, void *params); double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES); float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES); float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES); //JBM: Exact integral for power-law indices non zero (for zero it's erfc) double Fcollapprox (double numin, double beta); int n_redshifts_1DTable; double zmin_1DTable, zmax_1DTable, zbin_width_1DTable; double *FgtrM_1DTable_linear; static gsl_interp_accel *Q_at_z_spline_acc; static gsl_spline *Q_at_z_spline; static gsl_interp_accel *z_at_Q_spline_acc; static gsl_spline *z_at_Q_spline; static double Zmin, Zmax, Qmin, Qmax; void Q_at_z(double z, double *splined_value); void z_at_Q(double Q, double *splined_value); static gsl_interp_accel *deltaz_spline_for_photoncons_acc; static gsl_spline *deltaz_spline_for_photoncons; static gsl_interp_accel *NFHistory_spline_acc; static gsl_spline *NFHistory_spline; static gsl_interp_accel *z_NFHistory_spline_acc; static gsl_spline *z_NFHistory_spline; void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline); void z_at_NFHist(double xHI_Hist, double *splined_value); void NFHist_at_z(double z, double *splined_value); //int nbin; //double *z_Q, *Q_value, *Q_z, *z_value; double FinalNF_Estimate, FirstNF_Estimate; struct parameters_gsl_FgtrM_int_{ double z_obs; double gf_obs; }; struct parameters_gsl_SFR_General_int_{ double z_obs; double gf_obs; double Mdrop; double Mdrop_upper; double pl_star; double pl_esc; double frac_star; double frac_esc; double LimitMass_Fstar; double LimitMass_Fesc; }; struct parameters_gsl_SFR_con_int_{ double gf_obs; double Mval; double sigma2; double delta1; double delta2; double Mdrop; double Mdrop_upper; double pl_star; double pl_esc; double frac_star; double frac_esc; double LimitMass_Fstar; double LimitMass_Fesc; }; unsigned long *lvector(long nl, long nh); void free_lvector(unsigned long *v, long nl, long nh); float *vector(long nl, long nh); void free_vector(float *v, long nl, long nh); void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]); void splint(float xa[], float ya[], float y2a[], int n, float x, float *y); void gauleg(float x1, float x2, float x[], float w[], int n); /***** FUNCTION PROTOTYPES *****/ double init_ps(); /* initialize global variables, MUST CALL THIS FIRST!!! returns R_CUTOFF */ void free_ps(); /* deallocates the gsl structures from init_ps */ double sigma_z0(double M); //calculates sigma at z=0 (no dicke) double power_in_k(double k); /* Returns the value of the linear power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */ double TFmdm(double k); //Eisenstein & Hu power spectrum transfer function void TFset_parameters(); double TF_CLASS(double k, int flag_int, int flag_dv); //transfer function of matter (flag_dv=0) and relative velocities (flag_dv=1) fluctuations from CLASS double power_in_vcb(double k); /* Returns the value of the DM-b relative velocity power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */ double FgtrM(double z, double M); double FgtrM_wsigma(double z, double sig); double FgtrM_st(double z, double M); double FgtrM_Watson(double growthf, double M); double FgtrM_Watson_z(double z, double growthf, double M); double FgtrM_General(double z, double M); float erfcc(float x); double splined_erfc(double x); double M_J_WDM(); void Broadcast_struct_global_PS(struct UserParams *user_params, struct CosmoParams *cosmo_params){ cosmo_params_ps = cosmo_params; user_params_ps = user_params; } /* this function reads the z=0 matter (CDM+baryons) and relative velocity transfer functions from CLASS (from a file) flag_int = 0 to initialize interpolator, flag_int = -1 to free memory, flag_int = else to interpolate. flag_dv = 0 to output density, flag_dv = 1 to output velocity. similar to built-in function "double T_RECFAST(float z, int flag)" */ double TF_CLASS(double k, int flag_int, int flag_dv) { static double kclass[CLASS_LENGTH], Tmclass[CLASS_LENGTH], Tvclass_vcb[CLASS_LENGTH]; static gsl_interp_accel *acc_density, *acc_vcb; static gsl_spline *spline_density, *spline_vcb; float trash, currk, currTm, currTv; double ans; int i; int gsl_status; FILE *F; char filename[500]; sprintf(filename,"%s/%s",global_params.external_table_path,CLASS_FILENAME); if (flag_int == 0) { // Initialize vectors and read file if (!(F = fopen(filename, "r"))) { LOG_ERROR("Unable to open file: %s for reading.", filename); Throw(IOError); } int nscans; for (i = 0; i < CLASS_LENGTH; i++) { nscans = fscanf(F, "%e %e %e ", &currk, &currTm, &currTv); if (nscans != 3) { LOG_ERROR("Reading CLASS Transfer Function failed."); Throw(IOError); } kclass[i] = currk; Tmclass[i] = currTm; Tvclass_vcb[i] = currTv; if (i > 0 && kclass[i] <= kclass[i - 1]) { LOG_WARNING("Tk table not ordered"); LOG_WARNING("k=%.1le kprev=%.1le", kclass[i], kclass[i - 1]); } } fclose(F); LOG_SUPER_DEBUG("Read CLASS Transfer file"); gsl_set_error_handler_off(); // Set up spline table for densities acc_density = gsl_interp_accel_alloc (); spline_density = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH); gsl_status = gsl_spline_init(spline_density, kclass, Tmclass, CLASS_LENGTH); GSL_ERROR(gsl_status); LOG_SUPER_DEBUG("Generated CLASS Density Spline."); //Set up spline table for velocities acc_vcb = gsl_interp_accel_alloc (); spline_vcb = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH); gsl_status = gsl_spline_init(spline_vcb, kclass, Tvclass_vcb, CLASS_LENGTH); GSL_ERROR(gsl_status); LOG_SUPER_DEBUG("Generated CLASS velocity Spline."); return 0; } else if (flag_int == -1) { gsl_spline_free (spline_density); gsl_interp_accel_free(acc_density); gsl_spline_free (spline_vcb); gsl_interp_accel_free(acc_vcb); return 0; } if (k > kclass[CLASS_LENGTH-1]) { // k>kmax LOG_WARNING("Called TF_CLASS with k=%f, larger than kmax! Returning value at kmax.", k); if(flag_dv == 0){ // output is density return (Tmclass[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]); } else if(flag_dv == 1){ // output is rel velocity return (Tvclass_vcb[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]); } //we just set it to the last value, since sometimes it wants large k for R<<cell_size, which does not matter much. } else { // Do spline if(flag_dv == 0){ // output is density ans = gsl_spline_eval (spline_density, k, acc_density); } else if(flag_dv == 1){ // output is relative velocity ans = gsl_spline_eval (spline_vcb, k, acc_vcb); } else{ ans=0.0; //neither densities not velocities? } } return ans/k/k; //we have to divide by k^2 to agree with the old-fashioned convention. } // FUNCTION sigma_z0(M) // Returns the standard deviation of the normalized, density excess (delta(x)) field, // smoothed on the comoving scale of M (see filter definitions for M<->R conversion). // The sigma is evaluated at z=0, with the time evolution contained in the dicke(z) factor, // i.e. sigma(M,z) = sigma_z0(m) * dicke(z) // normalized so that sigma_z0(M->8/h Mpc) = SIGMA8 in ../Parameter_files/COSMOLOGY.H // NOTE: volume is normalized to = 1, so this is equvalent to the mass standard deviation // M is in solar masses // References: Padmanabhan, pg. 210, eq. 5.107 double dsigma_dk(double k, void *params){ double p, w, T, gamma, q, aa, bb, cc, kR; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } double Radius; Radius = *(double *)params; kR = k*Radius; if ( (global_params.FILTER == 0) || (sigma_norm < 0) ){ // top hat if ( (kR) < 1.0e-4 ){ w = 1.0;} // w converges to 1 as (kR) -> 0 else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));} } else if (global_params.FILTER == 1){ // gaussian of width 1/R w = pow(E, -kR*kR/2.0); } else { LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER); Throw(ValueError); } return k*k*p*w*w; } double sigma_z0(double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; double Radius; // R = MtoR(M); Radius = MtoR(M); // now lets do the integral for sigma and scale it with sigma_norm if(user_params_ps->POWER_SPECTRUM == 5){ kstart = FMAX(1.0e-99/Radius, KBOT_CLASS); kend = FMIN(350.0/Radius, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius; kend = 350.0/Radius; } lower_limit = kstart;//log(kstart); upper_limit = kend;//log(kend); F.function = &dsigma_dk; F.params = &Radius; //gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error); // gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS41, w, &result, &error); gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS15, w, &result, &error); gsl_integration_workspace_free (w); return sigma_norm * sqrt(result); } // FUNCTION TFmdm is the power spectrum transfer function from Eisenstein & Hu ApJ, 1999, 511, 5 double TFmdm(double k){ double q, gamma_eff, q_eff, TF_m, q_nu; q = k*pow(theta_cmb,2)/omhh; gamma_eff=sqrt(alpha_nu) + (1.0-sqrt(alpha_nu))/(1.0+pow(0.43*k*sound_horizon, 4)); q_eff = q/gamma_eff; TF_m= log(E+1.84*beta_c*sqrt(alpha_nu)*q_eff); TF_m /= TF_m + pow(q_eff,2) * (14.4 + 325.0/(1.0+60.5*pow(q_eff,1.11))); q_nu = 3.92*q/sqrt(f_nu/N_nu); TF_m *= 1.0 + (1.2*pow(f_nu,0.64)*pow(N_nu,0.3+0.6*f_nu)) / (pow(q_nu,-1.6)+pow(q_nu,0.8)); return TF_m; } void TFset_parameters(){ double z_drag, R_drag, R_equality, p_c, p_cb, f_c, f_cb, f_nub, k_equality; LOG_DEBUG("Setting Transfer Function parameters."); z_equality = 25000*omhh*pow(theta_cmb, -4) - 1.0; k_equality = 0.0746*omhh/(theta_cmb*theta_cmb); z_drag = 0.313*pow(omhh,-0.419) * (1 + 0.607*pow(omhh, 0.674)); z_drag = 1 + z_drag*pow(cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle, 0.238*pow(omhh, 0.223)); z_drag *= 1291 * pow(omhh, 0.251) / (1 + 0.659*pow(omhh, 0.828)); y_d = (1 + z_equality) / (1.0 + z_drag); R_drag = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_drag); R_equality = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_equality); sound_horizon = 2.0/3.0/k_equality * sqrt(6.0/R_equality) * log( (sqrt(1+R_drag) + sqrt(R_drag+R_equality)) / (1.0 + sqrt(R_equality)) ); p_c = -(5 - sqrt(1 + 24*(1 - f_nu-f_baryon)))/4.0; p_cb = -(5 - sqrt(1 + 24*(1 - f_nu)))/4.0; f_c = 1 - f_nu - f_baryon; f_cb = 1 - f_nu; f_nub = f_nu+f_baryon; alpha_nu = (f_c/f_cb) * (2*(p_c+p_cb)+5)/(4*p_cb+5.0); alpha_nu *= 1 - 0.553*f_nub+0.126*pow(f_nub,3); alpha_nu /= 1-0.193*sqrt(f_nu)+0.169*f_nu; alpha_nu *= pow(1+y_d, p_c-p_cb); alpha_nu *= 1+ (p_cb-p_c)/2.0 * (1.0+1.0/(4.0*p_c+3.0)/(4.0*p_cb+7.0))/(1.0+y_d); beta_c = 1.0/(1.0-0.949*f_nub); } // Returns the value of the linear power spectrum DENSITY (i.e. <|delta_k|^2>/V) // at a given k mode linearly extrapolated to z=0 double power_in_k(double k){ double p, T, gamma, q, aa, bb, cc; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; //p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05 } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } return p*TWOPI*PI*sigma_norm*sigma_norm; } /* Returns the value of the linear power spectrum of the DM-b relative velocity at kinematic decoupling (which we set at zkin=1010) */ double power_in_vcb(double k){ double p, T, gamma, q, aa, bb, cc; //only works if using CLASS if (user_params_ps->POWER_SPECTRUM == 5){ // CLASS T = TF_CLASS(k, 1, 1); //read from CLASS file. flag_int=1 since we have initialized before, flag_vcb=1 for velocity p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else{ LOG_ERROR("Cannot get P_cb unless using CLASS: %i\n Set USE_RELATIVE_VELOCITIES 0 or use CLASS.\n", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } return p*TWOPI*PI*sigma_norm*sigma_norm; } double init_ps(){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; //we start the interpolator if using CLASS: if (user_params_ps->POWER_SPECTRUM == 5){ LOG_DEBUG("Setting CLASS Transfer Function inits."); TF_CLASS(1.0, 0, 0); } // Set cuttoff scale for WDM (eq. 4 in Barkana et al. 2001) in comoving Mpc R_CUTOFF = 0.201*pow((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15, 0.15)*pow(global_params.g_x/1.5, -0.29)*pow(global_params.M_WDM, -1.15); omhh = cosmo_params_ps->OMm*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle; theta_cmb = T_cmb / 2.7; // Translate Parameters into forms GLOBALVARIABLES form f_nu = global_params.OMn/cosmo_params_ps->OMm; f_baryon = cosmo_params_ps->OMb/cosmo_params_ps->OMm; if (f_nu < TINY) f_nu = 1e-10; if (f_baryon < TINY) f_baryon = 1e-10; TFset_parameters(); sigma_norm = -1; double Radius_8; Radius_8 = 8.0/cosmo_params_ps->hlittle; if(user_params_ps->POWER_SPECTRUM == 5){ kstart = FMAX(1.0e-99/Radius_8, KBOT_CLASS); kend = FMIN(350.0/Radius_8, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius_8; kend = 350.0/Radius_8; } lower_limit = kstart; upper_limit = kend; LOG_DEBUG("Initializing Power Spectrum with lower_limit=%e, upper_limit=%e, rel_tol=%e, radius_8=%g", lower_limit,upper_limit, rel_tol, Radius_8); F.function = &dsigma_dk; F.params = &Radius_8; gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); gsl_integration_workspace_free (w); LOG_DEBUG("Initialized Power Spectrum."); sigma_norm = cosmo_params_ps->SIGMA_8/sqrt(result); //takes care of volume factor return R_CUTOFF; } //function to free arrays related to the power spectrum void free_ps(){ //we free the PS interpolator if using CLASS: if (user_params_ps->POWER_SPECTRUM == 5){ TF_CLASS(1.0, -1, 0); } return; } /* FUNCTION dsigmasqdm_z0(M) returns d/dm (sigma^2) (see function sigma), in units of Msun^-1 */ double dsigmasq_dm(double k, void *params){ double p, w, T, gamma, q, aa, bb, cc, dwdr, drdm, kR; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu ApJ, 1999, 511, 5 T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; //p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05 } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / (cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + pow(bb*k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // JBM: CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } double Radius; Radius = *(double *)params; // now get the value of the window function kR = k * Radius; if (global_params.FILTER == 0){ // top hat if ( (kR) < 1.0e-4 ){ w = 1.0; }// w converges to 1 as (kR) -> 0 else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));} // now do d(w^2)/dm = 2 w dw/dr dr/dm if ( (kR) < 1.0e-10 ){ dwdr = 0;} else{ dwdr = 9*cos(kR)*k/pow(kR,3) + 3*sin(kR)*(1 - 3/(kR*kR))/(kR*Radius);} //3*k*( 3*cos(kR)/pow(kR,3) + sin(kR)*(-3*pow(kR, -4) + 1/(kR*kR)) );} // dwdr = -1e8 * k / (R*1e3); drdm = 1.0 / (4.0*PI * cosmo_params_ps->OMm*RHOcrit * Radius*Radius); } else if (global_params.FILTER == 1){ // gaussian of width 1/R w = pow(E, -kR*kR/2.0); dwdr = - k*kR * w; drdm = 1.0 / (pow(2*PI, 1.5) * cosmo_params_ps->OMm*RHOcrit * 3*Radius*Radius); } else { LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER); Throw(ValueError); } // return k*k*p*2*w*dwdr*drdm * d2fact; return k*k*p*2*w*dwdr*drdm; } double dsigmasqdm_z0(double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; double Radius; // R = MtoR(M); Radius = MtoR(M); // now lets do the integral for sigma and scale it with sigma_norm if(user_params_ps->POWER_SPECTRUM == 5){ kstart = FMAX(1.0e-99/Radius, KBOT_CLASS); kend = FMIN(350.0/Radius, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius; kend = 350.0/Radius; } lower_limit = kstart;//log(kstart); upper_limit = kend;//log(kend); if (user_params_ps->POWER_SPECTRUM == 5){ // for CLASS we do not need to renormalize the sigma integral. d2fact=1.0; } else { d2fact = M*10000/sigma_z0(M); } F.function = &dsigmasq_dm; F.params = &Radius; gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error); // gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS15, w, &result, &error); gsl_integration_workspace_free (w); // return sigma_norm * sigma_norm * result /d2fact; return sigma_norm * sigma_norm * result; } /* sheth correction to delta crit */ double sheth_delc(double del, double sig){ return sqrt(SHETH_a)*del*(1. + global_params.SHETH_b*pow(sig*sig/(SHETH_a*del*del), global_params.SHETH_c)); } /* FUNCTION dNdM_st(z, M) Computes the Press_schechter mass function with Sheth-Torman correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Sheth, Mo, Torman 2001 */ double dNdM_st(double growthf, double M){ double sigma, dsigmadm, nuhat; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); nuhat = sqrt(SHETH_a) * Deltac / sigma; return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * sqrt(2./PI)*SHETH_A * (1+ pow(nuhat, -2*SHETH_p)) * nuhat * pow(E, -nuhat*nuhat/2.0); } /* FUNCTION dNdM_WatsonFOF(z, M) Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). The Universial FOF function (Eq. 12) of Watson et al. 2013 The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Watson et al. 2013 */ double dNdM_WatsonFOF(double growthf, double M){ double sigma, dsigmadm, f_sigma; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); f_sigma = Watson_A * ( pow( Watson_beta/sigma, Watson_alpha) + 1. ) * exp( - Watson_gamma/(sigma*sigma) ); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma; } /* FUNCTION dNdM_WatsonFOF_z(z, M) Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). The Universial FOF function, with redshift evolution (Eq. 12 - 15) of Watson et al. 2013. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Watson et al. 2013 */ double dNdM_WatsonFOF_z(double z, double growthf, double M){ double sigma, dsigmadm, A_z, alpha_z, beta_z, Omega_m_z, f_sigma; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); Omega_m_z = (cosmo_params_ps->OMm)*pow(1.+z,3.) / ( (cosmo_params_ps->OMl) + (cosmo_params_ps->OMm)*pow(1.+z,3.) + (global_params.OMr)*pow(1.+z,4.) ); A_z = Omega_m_z * ( Watson_A_z_1 * pow(1. + z, Watson_A_z_2 ) + Watson_A_z_3 ); alpha_z = Omega_m_z * ( Watson_alpha_z_1 * pow(1.+z, Watson_alpha_z_2 ) + Watson_alpha_z_3 ); beta_z = Omega_m_z * ( Watson_beta_z_1 * pow(1.+z, Watson_beta_z_2 ) + Watson_beta_z_3 ); f_sigma = A_z * ( pow(beta_z/sigma, alpha_z) + 1. ) * exp( - Watson_gamma_z/(sigma*sigma) ); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma; } /* FUNCTION dNdM(growthf, M) Computes the Press_schechter mass function at redshift z (using the growth factor), and dark matter halo mass M (in solar masses). Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Padmanabhan, pg. 214 */ double dNdM(double growthf, double M){ double sigma, dsigmadm; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * sqrt(2/PI) * (Deltac/(sigma*sigma)) * dsigmadm * pow(E, -(Deltac*Deltac)/(2*sigma*sigma)); } /* FUNCTION FgtrM(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z */ double FgtrM(double z, double M){ double del, sig; del = Deltac/dicke(z); //regular spherical collapse delta sig = sigma_z0(M); return splined_erfc(del / (sqrt(2)*sig)); } /* FUNCTION FgtrM_wsigma(z, sigma_z0(M)) Computes the fraction of mass contained in haloes with mass > M at redshift z. Requires sigma_z0(M) rather than M to make certain heating integrals faster */ double FgtrM_wsigma(double z, double sig){ double del; del = Deltac/dicke(z); //regular spherical collapse delta return splined_erfc(del / (sqrt(2)*sig)); } /* FUNCTION FgtrM_Watson(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z Uses Watson et al (2013) correction */ double dFdlnM_Watson_z (double lnM, void *params){ struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; return dNdM_WatsonFOF_z(z, growthf, M) * M * M; } double FgtrM_Watson_z(double z, double growthf, double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_Watson_z; struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = { .z_obs = z, .gf_obs = growthf, }; F.params = &parameters_gsl_FgtrM; lower_limit = log(M); upper_limit = log(FMAX(global_params.M_MAX_INTEGRAL, M*100)); gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } /* FUNCTION FgtrM_Watson(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z Uses Watson et al (2013) correction */ double dFdlnM_Watson (double lnM, void *params){ double growthf = *(double *)params; double M = exp(lnM); return dNdM_WatsonFOF(growthf, M) * M * M; } double FgtrM_Watson(double growthf, double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_Watson; F.params = &growthf; lower_limit = log(M); upper_limit = log(FMAX(global_params.M_MAX_INTEGRAL, M*100)); gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } double dFdlnM_General(double lnM, void *params){ struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassFunction; if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf, M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M; } /* FUNCTION FgtrM_General(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z */ double FgtrM_General(double z, double M){ double del, sig, growthf; int status; growthf = dicke(z); struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = { .z_obs = z, .gf_obs = growthf, }; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_General; F.params = &parameters_gsl_FgtrM; lower_limit = log(M); upper_limit = log(FMAX(global_params.M_MAX_INTEGRAL, M*100)); gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M); Throw(GSLError); } gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw(ValueError); } } double dNion_General(double lnM, void *params){ struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassTurnover = vals.Mdrop; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar10 = vals.frac_star; double Fesc10 = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar, Fesc, MassFunction; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1/Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf,M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M * exp(-MassTurnover/M) * Fstar * Fesc; } double Nion_General(double z, double M_Min, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc){ double growthf; growthf = dicke(z); double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = { .z_obs = z, .gf_obs = growthf, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc, }; int status; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { F.function = &dNion_General; F.params = &parameters_gsl_SFR; lower_limit = log(M_Min); upper_limit = log(FMAX(global_params.M_MAX_INTEGRAL, M_Min*100)); gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("(function argument): %e %e %e %e %e\n",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: %e %e %e %e %e %e %e %e %e\n",z,growthf,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc); Throw GSLError; } gsl_integration_workspace_free (w); return result / ((cosmo_params_ps->OMm)*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw ValueError; } } double dNion_General_MINI(double lnM, void *params){ struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassTurnover = vals.Mdrop; double MassTurnover_upper = vals.Mdrop_upper; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar7_MINI = vals.frac_star; double Fesc7_MINI = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar, Fesc, MassFunction; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1/Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf,M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M * exp(-MassTurnover/M) * exp(-M/MassTurnover_upper) * Fstar * Fesc; } double Nion_General_MINI(double z, double M_Min, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar7_MINI, double Fesc7_MINI, double Mlim_Fstar, double Mlim_Fesc){ double growthf; growthf = dicke(z); double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = { .z_obs = z, .gf_obs = growthf, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar7_MINI, .frac_esc = Fesc7_MINI, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc, }; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { F.function = &dNion_General_MINI; F.params = &parameters_gsl_SFR; lower_limit = log(M_Min); upper_limit = log(FMAX(global_params.M_MAX_INTEGRAL, M_Min*100)); gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); gsl_integration_workspace_free (w); return result / ((cosmo_params_ps->OMm)*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw(ValueError); } } /* returns the "effective Jeans mass" in Msun corresponding to the gas analog of WDM ; eq. 10 in Barkana+ 2001 */ double M_J_WDM(){ double z_eq, fudge=60; if (!(global_params.P_CUTOFF)) return 0; z_eq = 3600*(cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15; return fudge*3.06e8 * (1.5/global_params.g_x) * sqrt((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15) * pow(global_params.M_WDM, -4) * pow(z_eq/3000.0, 1.5); } float erfcc(float x) { double t,q,ans; q=fabs(x); t=1.0/(1.0+0.5*q); ans=t*exp(-q*q-1.2655122+t*(1.0000237+t*(0.374092+t*(0.0967842+ t*(-0.1862881+t*(0.2788681+t*(-1.13520398+t*(1.4885159+ t*(-0.82215223+t*0.17087277))))))))); return x >= 0.0 ? ans : 2.0-ans; } double splined_erfc(double x){ if (x < 0){ return 1.0; } // TODO: This could be wrapped in a Try/Catch to try the fast way and if it doesn't // work, use the slow way. return erfcc(x); // the interpolation below doesn't seem to be stable in Ts.c if (x > ERFC_PARAM_DELTA*(ERFC_NPTS-1)) return erfcc(x); else return exp(gsl_spline_eval(erfc_spline, x, erfc_acc)); } void gauleg(float x1, float x2, float x[], float w[], int n) //Given the lower and upper limits of integration x1 and x2, and given n, this routine returns arrays x[1..n] and w[1..n] of length n, //containing the abscissas and weights of the Gauss- Legendre n-point quadrature formula. { int m,j,i; double z1,z,xm,xl,pp,p3,p2,p1; m=(n+1)/2; xm=0.5*(x2+x1); xl=0.5*(x2-x1); for (i=1;i<=m;i++) { //High precision is a good idea for this routine. //The roots are symmetric in the interval, so we only have to find half of them. //Loop over the desired roots. z=cos(3.141592654*(i-0.25)/(n+0.5)); //Starting with the above approximation to the ith root, we enter the main loop of refinement by Newton’s method. do { p1=1.0; p2=0.0; for (j=1;j<=n;j++) { //Loop up the recurrence relation to get the Legendre polynomial evaluated at z. p3=p2; p2=p1; p1=((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j; } //p1 is now the desired Legendre polynomial. We next compute pp, its derivative, by a standard relation involving also p2, //the polynomial of one lower order. pp=n*(z*p1-p2)/(z*z-1.0); z1=z; z=z1-p1/pp; } while (fabs(z-z1) > EPS2); x[i]=xm-xl*z; x[n+1-i]=xm+xl*z; w[i]=2.0*xl/((1.0-z*z)*pp*pp); w[n+1-i]=w[i]; } } void initialiseSigmaMInterpTable(float M_Min, float M_Max) { int i; float Mass; if (Mass_InterpTable == NULL){ Mass_InterpTable = calloc(NMass,sizeof(float)); Sigma_InterpTable = calloc(NMass,sizeof(float)); dSigmadm_InterpTable = calloc(NMass,sizeof(float)); } #pragma omp parallel shared(Mass_InterpTable,Sigma_InterpTable,dSigmadm_InterpTable) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NMass;i++) { Mass_InterpTable[i] = log(M_Min) + (float)i/(NMass-1)*( log(M_Max) - log(M_Min) ); Sigma_InterpTable[i] = sigma_z0(exp(Mass_InterpTable[i])); dSigmadm_InterpTable[i] = log10(-dsigmasqdm_z0(exp(Mass_InterpTable[i]))); } } for(i=0;i<NMass;i++) { if(isfinite(Mass_InterpTable[i]) == 0 || isfinite(Sigma_InterpTable[i]) == 0 || isfinite(dSigmadm_InterpTable[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in initialiseSigmaMInterpTable"); Throw(ParameterError); } } MinMass = log(M_Min); mass_bin_width = 1./(NMass-1)*( log(M_Max) - log(M_Min) ); inv_mass_bin_width = 1./mass_bin_width; } void freeSigmaMInterpTable() { free(Mass_InterpTable); free(Sigma_InterpTable); free(dSigmadm_InterpTable); Mass_InterpTable = NULL; } void nrerror(char error_text[]) { LOG_ERROR("Numerical Recipes run-time error..."); LOG_ERROR("%s",error_text); Throw(MemoryAllocError); } float *vector(long nl, long nh) /* allocate a float vector with subscript range v[nl..nh] */ { float *v; v = (float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float))); if(!v) nrerror("allocation failure in vector()"); return v - nl + NR_END; } void free_vector(float *v, long nl, long nh) /* free a float vector allocated with vector() */ { free((FREE_ARG) (v+nl-NR_END)); } void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]) /*Given arrays x[1..n] and y[1..n] containing a tabulated function, i.e., yi = f(xi), with x1 <x2 < :: : < xN, and given values yp1 and ypn for the first derivative of the interpolating function at points 1 and n, respectively, this routine returns an array y2[1..n] that contains the second derivatives of the interpolating function at the tabulated points xi. If yp1 and/or ypn are equal to 1e30 or larger, the routine is signaled to set the corresponding boundary condition for a natural spline, with zero second derivative on that boundary.*/ { int i,k; float p,qn,sig,un,*u; int na,nb,check; u=vector(1,n-1); if (yp1 > 0.99e30) // The lower boundary condition is set either to be "natural" y2[1]=u[1]=0.0; else { // or else to have a specified first derivative. y2[1] = -0.5; u[1]=(3.0/(x[2]-x[1]))*((y[2]-y[1])/(x[2]-x[1])-yp1); } for (i=2;i<=n-1;i++) { //This is the decomposition loop of the tridiagonal algorithm. sig=(x[i]-x[i-1])/(x[i+1]-x[i-1]); //y2 and u are used for temporary na = 1; nb = 1; check = 0; while(((float)(x[i+na*1]-x[i-nb*1])==(float)0.0)) { check = check + 1; if(check%2==0) { na = na + 1; } else { nb = nb + 1; } sig=(x[i]-x[i-1])/(x[i+na*1]-x[i-nb*1]); } p=sig*y2[i-1]+2.0; //storage of the decomposed y2[i]=(sig-1.0)/p; // factors. u[i]=(y[i+1]-y[i])/(x[i+1]-x[i]) - (y[i]-y[i-1])/(x[i]-x[i-1]); u[i]=(6.0*u[i]/(x[i+1]-x[i-1])-sig*u[i-1])/p; if(((float)(x[i+1]-x[i])==(float)0.0) || ((float)(x[i]-x[i-1])==(float)0.0)) { na = 0; nb = 0; check = 0; while((float)(x[i+na*1]-x[i-nb])==(float)(0.0) || ((float)(x[i+na]-x[i-nb*1])==(float)0.0)) { check = check + 1; if(check%2==0) { na = na + 1; } else { nb = nb + 1; } } u[i]=(y[i+1]-y[i])/(x[i+na*1]-x[i-nb]) - (y[i]-y[i-1])/(x[i+na]-x[i-nb*1]); u[i]=(6.0*u[i]/(x[i+na*1]-x[i-nb*1])-sig*u[i-1])/p; } } if (ypn > 0.99e30) //The upper boundary condition is set either to be "natural" qn=un=0.0; else { //or else to have a specified first derivative. qn=0.5; un=(3.0/(x[n]-x[n-1]))*(ypn-(y[n]-y[n-1])/(x[n]-x[n-1])); } y2[n]=(un-qn*u[n-1])/(qn*y2[n-1]+1.0); for (k=n-1;k>=1;k--) { //This is the backsubstitution loop of the tridiagonal y2[k]=y2[k]*y2[k+1]+u[k]; //algorithm. } free_vector(u,1,n-1); } void splint(float xa[], float ya[], float y2a[], int n, float x, float *y) /*Given the arrays xa[1..n] and ya[1..n], which tabulate a function (with the xai's in order), and given the array y2a[1..n], which is the output from spline above, and given a value of x, this routine returns a cubic-spline interpolated value y.*/ { void nrerror(char error_text[]); int klo,khi,k; float h,b,a; klo=1; // We will find the right place in the table by means of khi=n; //bisection. This is optimal if sequential calls to this while (khi-klo > 1) { //routine are at random values of x. If sequential calls k=(khi+klo) >> 1; //are in order, and closely spaced, one would do better if (xa[k] > x) khi=k; //to store previous values of klo and khi and test if else klo=k; //they remain appropriate on the next call. } // klo and khi now bracket the input value of x. h=xa[khi]-xa[klo]; if (h == 0.0) nrerror("Bad xa input to routine splint"); //The xa's must be distinct. a=(xa[khi]-x)/h; b=(x-xa[klo])/h; //Cubic spline polynomial is now evaluated. *y=a*ya[klo]+b*ya[khi]+((a*a*a-a)*y2a[klo]+(b*b*b-b)*y2a[khi])*(h*h)/6.0; } unsigned long *lvector(long nl, long nh) /* allocate an unsigned long vector with subscript range v[nl..nh] */ { unsigned long *v; v = (unsigned long *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(long))); if(!v) nrerror("allocation failure in lvector()"); return v - nl + NR_END; } void free_lvector(unsigned long *v, long nl, long nh) /* free an unsigned long vector allocated with lvector() */ { free((FREE_ARG) (v+nl-NR_END)); } /* dnbiasdM */ double dnbiasdM(double M, float z, double M_o, float del_o){ double sigsq, del, sig_one, sig_o; if ((M_o-M) < TINY){ LOG_ERROR("In function dnbiasdM: M must be less than M_o!\nAborting...\n"); Throw(ValueError); } del = Deltac/dicke(z) - del_o; if (del < 0){ LOG_ERROR(" In function dnbiasdM: del_o must be less than del_1 = del_crit/dicke(z)!\nAborting...\n"); Throw(ValueError); } sig_o = sigma_z0(M_o); sig_one = sigma_z0(M); sigsq = sig_one*sig_one - sig_o*sig_o; return -(RHOcrit*cosmo_params_ps->OMm)/M /sqrt(2*PI) *del*pow(sigsq,-1.5)*pow(E, -0.5*del*del/sigsq)*dsigmasqdm_z0(M); } /* calculates the fraction of mass contained in haloes with mass > M at redshift z, in regions with a linear overdensity of del_bias, and standard deviation sig_bias */ double FgtrM_bias(double z, double M, double del_bias, double sig_bias){ double del, sig, sigsmallR; sigsmallR = sigma_z0(M); if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo! // fprintf(stderr, "FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n"); // return 0; return 0.000001; } del = Deltac/dicke(z) - del_bias; sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias); return splined_erfc(del / (sqrt(2)*sig)); } /* Uses sigma parameters instead of Mass for scale */ double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias){ double del, sig; if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo! // fprintf(stderr, "local_FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n"); // return 0; return 0.000001; } del = Deltac/dicke(z) - del_bias; sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias); return splined_erfc(del / (sqrt(2)*sig)); } /* redshift derivative of the growth function at z */ double ddicke_dz(double z){ float dz = 1e-10; double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz; return (dicke(z+dz)-dicke(z))/dz; } /* compute a mass limit where the stellar baryon fraction and the escape fraction exceed unity */ float Mass_limit (float logM, float PL, float FRAC) { return FRAC*pow(pow(10.,logM)/1e10,PL); } void bisection(float *x, float xlow, float xup, int *iter){ *x=(xlow + xup)/2.; ++(*iter); } float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC){ int i, iter, max_iter=200; float rel_tol=0.001; float logMlow, logMupper, x, x1; iter = 0; logMlow = log10(Mmin); logMupper = log10(Mmax); if (PL < 0.) { if (Mass_limit(logMlow,PL,FRAC) <= 1.) { return Mmin; } } else if (PL > 0.) { if (Mass_limit(logMupper,PL,FRAC) <= 1.) { return Mmax; } } else return 0; bisection(&x, logMlow, logMupper, &iter); do { if((Mass_limit(logMlow,PL,FRAC)-1.)*(Mass_limit(x,PL,FRAC)-1.) < 0.) logMupper = x; else logMlow = x; bisection(&x1, logMlow, logMupper, &iter); if(fabs(x1-x) < rel_tol) { return pow(10.,x1); } x = x1; } while(iter < max_iter); // Got to max_iter without finding a solution. LOG_ERROR("Failed to find a mass limit to regulate stellar fraction/escape fraction is between 0 and 1."); LOG_ERROR(" The solution does not converge or iterations are not sufficient."); Throw(ParameterError); return(0.0); } int initialise_ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options) { Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); lnMhalo_param = calloc(nbins,sizeof(double)); Muv_param = calloc(nbins,sizeof(double)); Mhalo_param = calloc(nbins,sizeof(double)); LF_spline_acc = gsl_interp_accel_alloc(); LF_spline = gsl_spline_alloc(gsl_interp_cspline, nbins); init_ps(); int status; Try initialiseSigmaMInterpTable(0.999*Mhalo_min,1.001*Mhalo_max); Catch(status) { LOG_ERROR("\t...called from initialise_ComputeLF"); return(status); } initialised_ComputeLF = true; return(0); } void cleanup_ComputeLF(){ free(lnMhalo_param); free(Muv_param); free(Mhalo_param); gsl_spline_free (LF_spline); gsl_interp_accel_free(LF_spline_acc); freeSigmaMInterpTable(); initialised_ComputeLF = 0; } int ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options, int component, int NUM_OF_REDSHIFT_FOR_LF, float *z_LF, float *M_TURNs, double *M_uv_z, double *M_h_z, double *log10phi) { /* This is an API-level function and thus returns an int status. */ int status; Try{ // This try block covers the whole function. // This NEEDS to be done every time, because the actual object passed in as // user_params, cosmo_params etc. can change on each call, freeing up the memory. initialise_ComputeLF(nbins, user_params,cosmo_params,astro_params,flag_options); int i,i_z; int i_unity, i_smth, mf, nbins_smth=7; double dlnMhalo, lnMhalo_i, SFRparam, Muv_1, Muv_2, dMuvdMhalo; double Mhalo_i, lnMhalo_min, lnMhalo_max, lnMhalo_lo, lnMhalo_hi, dlnM, growthf; double f_duty_upper, Mcrit_atom; float Fstar, Fstar_temp; double dndm; int gsl_status; gsl_set_error_handler_off(); if (astro_params->ALPHA_STAR < -0.5) LOG_WARNING( "ALPHA_STAR is %f, which is unphysical value given the observational LFs.\n"\ "Also, when ALPHA_STAR < -.5, LFs may show a kink. It is recommended to set ALPHA_STAR > -0.5.", astro_params->ALPHA_STAR ); mf = user_params_ps->HMF; lnMhalo_min = log(Mhalo_min*0.999); lnMhalo_max = log(Mhalo_max*1.001); dlnMhalo = (lnMhalo_max - lnMhalo_min)/(double)(nbins - 1); for (i_z=0; i_z<NUM_OF_REDSHIFT_FOR_LF; i_z++) { growthf = dicke(z_LF[i_z]); Mcrit_atom = atomic_cooling_threshold(z_LF[i_z]); i_unity = -1; for (i=0; i<nbins; i++) { // generate interpolation arrays lnMhalo_param[i] = lnMhalo_min + dlnMhalo*(double)i; Mhalo_i = exp(lnMhalo_param[i]); if (component == 1) Fstar = astro_params->F_STAR10*pow(Mhalo_i/1e10,astro_params->ALPHA_STAR); else Fstar = astro_params->F_STAR7_MINI*pow(Mhalo_i/1e7,astro_params->ALPHA_STAR); if (Fstar > 1.) Fstar = 1; if (i_unity < 0) { // Find the array number at which Fstar crosses unity. if (astro_params->ALPHA_STAR > 0.) { if ( (1.- Fstar) < FRACT_FLOAT_ERR ) i_unity = i; } else if (astro_params->ALPHA_STAR < 0. && i < nbins-1) { if (component == 1) Fstar_temp = astro_params->F_STAR10*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e10,astro_params->ALPHA_STAR); else Fstar_temp = astro_params->F_STAR7_MINI*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e7,astro_params->ALPHA_STAR); if (Fstar_temp < 1. && (1.- Fstar) < FRACT_FLOAT_ERR) i_unity = i; } } // parametrization of SFR SFRparam = Mhalo_i * cosmo_params->OMb/cosmo_params->OMm * (double)Fstar * (double)(hubble(z_LF[i_z])*SperYR/astro_params->t_STAR); // units of M_solar/year Muv_param[i] = 51.63 - 2.5*log10(SFRparam*Luv_over_SFR); // UV magnitude // except if Muv value is nan or inf, but avoid error put the value as 10. if ( isinf(Muv_param[i]) || isnan(Muv_param[i]) ) Muv_param[i] = 10.; M_uv_z[i + i_z*nbins] = Muv_param[i]; } gsl_status = gsl_spline_init(LF_spline, lnMhalo_param, Muv_param, nbins); GSL_ERROR(gsl_status); lnMhalo_lo = log(Mhalo_min); lnMhalo_hi = log(Mhalo_max); dlnM = (lnMhalo_hi - lnMhalo_lo)/(double)(nbins - 1); // There is a kink on LFs at which Fstar crosses unity. This kink is a numerical artefact caused by the derivate of dMuvdMhalo. // Most of the cases the kink doesn't appear in magnitude ranges we are interested (e.g. -22 < Muv < -10). However, for some extreme // parameters, it appears. To avoid this kink, we use the interpolation of the derivate in the range where the kink appears. // 'i_unity' is the array number at which the kink appears. 'i_unity-3' and 'i_unity+12' are related to the range of interpolation, // which is an arbitrary choice. // NOTE: This method does NOT work in cases with ALPHA_STAR < -0.5. But, this parameter range is unphysical given that the // observational LFs favour positive ALPHA_STAR in this model. // i_smth = 0: calculates LFs without interpolation. // i_smth = 1: calculates LFs using interpolation where Fstar crosses unity. if (i_unity-3 < 0) i_smth = 0; else if (i_unity+12 > nbins-1) i_smth = 0; else i_smth = 1; if (i_smth == 0) { for (i=0; i<nbins; i++) { // calculate luminosity function lnMhalo_i = lnMhalo_lo + dlnM*(double)i; Mhalo_param[i] = exp(lnMhalo_i); M_h_z[i + i_z*nbins] = Mhalo_param[i]; Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc); Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc); dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i)); if (component == 1) f_duty_upper = 1.; else f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom)); if(mf==0) { log10phi[i + i_z*nbins] = log10( dNdM(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==1) { log10phi[i + i_z*nbins] = log10( dNdM_st(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==2) { log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==3) { log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF_z(z_LF[i_z], growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else{ LOG_ERROR("HMF should be between 0-3, got %d", mf); Throw(ValueError); } if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.) log10phi[i + i_z*nbins] = -30.; } } else { lnM_temp = calloc(nbins_smth,sizeof(double)); deriv_temp = calloc(nbins_smth,sizeof(double)); deriv = calloc(nbins,sizeof(double)); for (i=0; i<nbins; i++) { // calculate luminosity function lnMhalo_i = lnMhalo_lo + dlnM*(double)i; Mhalo_param[i] = exp(lnMhalo_i); M_h_z[i + i_z*nbins] = Mhalo_param[i]; Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc); Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc); dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i)); deriv[i] = fabs(dMuvdMhalo); } deriv_spline_acc = gsl_interp_accel_alloc(); deriv_spline = gsl_spline_alloc(gsl_interp_cspline, nbins_smth); // generate interpolation arrays to smooth discontinuity of the derivative causing a kink // Note that the number of array elements and the range of interpolation are made by arbitrary choices. lnM_temp[0] = lnMhalo_param[i_unity - 3]; lnM_temp[1] = lnMhalo_param[i_unity - 2]; lnM_temp[2] = lnMhalo_param[i_unity + 8]; lnM_temp[3] = lnMhalo_param[i_unity + 9]; lnM_temp[4] = lnMhalo_param[i_unity + 10]; lnM_temp[5] = lnMhalo_param[i_unity + 11]; lnM_temp[6] = lnMhalo_param[i_unity + 12]; deriv_temp[0] = deriv[i_unity - 3]; deriv_temp[1] = deriv[i_unity - 2]; deriv_temp[2] = deriv[i_unity + 8]; deriv_temp[3] = deriv[i_unity + 9]; deriv_temp[4] = deriv[i_unity + 10]; deriv_temp[5] = deriv[i_unity + 11]; deriv_temp[6] = deriv[i_unity + 12]; gsl_status = gsl_spline_init(deriv_spline, lnM_temp, deriv_temp, nbins_smth); GSL_ERROR(gsl_status); for (i=0;i<9;i++){ deriv[i_unity + i - 1] = gsl_spline_eval(deriv_spline, lnMhalo_param[i_unity + i - 1], deriv_spline_acc); } for (i=0; i<nbins; i++) { if (component == 1) f_duty_upper = 1.; else f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom)); if(mf==0) dndm = dNdM(growthf, Mhalo_param[i]); else if(mf==1) dndm = dNdM_st(growthf, Mhalo_param[i]); else if(mf==2) dndm = dNdM_WatsonFOF(growthf, Mhalo_param[i]); else if(mf==3) dndm = dNdM_WatsonFOF_z(z_LF[i_z], growthf, Mhalo_param[i]); else{ LOG_ERROR("HMF should be between 0-3, got %d", mf); Throw(ValueError); } log10phi[i + i_z*nbins] = log10(dndm * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / deriv[i]); if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.) log10phi[i + i_z*nbins] = -30.; } } } cleanup_ComputeLF(); } // End try Catch(status){ return status; } return(0); } void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max){ //calculates the weightings and the positions for Gauss-Legendre quadrature. gauleg(log(M_Min),log(M_Max),xi_SFR_Xray,wi_SFR_Xray,n); } float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2){ float sigma1, dsigmadm,dsigma_val; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (M1 - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma1 = Sigma_InterpTable[MassBin] + ( M1 - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigma_val = dSigmadm_InterpTable[MassBin] + ( M1 - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigma_val); } else { sigma1 = sigma_z0(exp(M1)); dsigmadm = dsigmasqdm_z0(exp(M1)); } M1 = exp(M1); M2 = exp(M2); sigma1 = sigma1*sigma1; sigma2 = sigma2*sigma2; dsigmadm = dsigmadm/(2.0*sigma1); // This is actually sigma1^{2} as calculated above, however, it should just be sigma1. It cancels with the same factor below. Why I have decided to write it like that I don't know! if((sigma1 > sigma2)) { return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( sigma1 - sigma2 ) ) ) )/(pow( sigma1 - sigma2, 1.5)); } else if(sigma1==sigma2) { return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( 1.e-6 ) ) ) )/(pow( 1.e-6, 1.5)); } else { return 0.; } } void initialiseGL_Nion(int n, float M_Min, float M_Max){ //calculates the weightings and the positions for Gauss-Legendre quadrature. gauleg(log(M_Min),log(M_Max),xi_SFR,wi_SFR,n); } double dNion_ConditionallnM_MINI(double lnM, void *params) { struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params; double M = exp(lnM); // linear scale double growthf = vals.gf_obs; double M2 = vals.Mval; // natural log scale double sigma2 = vals.sigma2; double del1 = vals.delta1; double del2 = vals.delta2; double MassTurnover = vals.Mdrop; double MassTurnover_upper = vals.Mdrop_upper; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar7_MINI = vals.frac_star; double Fesc7_MINI = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } double dNion_ConditionallnM(double lnM, void *params) { struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params; double M = exp(lnM); // linear scale double growthf = vals.gf_obs; double M2 = vals.Mval; // natural log scale double sigma2 = vals.sigma2; double del1 = vals.delta1; double del2 = vals.delta2; double MassTurnover = vals.Mdrop; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar10 = vals.frac_star; double Fesc10 = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) { if (FAST_FCOLL_TABLES) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. return GaussLegendreQuad_Nion_MINI(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) MassTurnover_upper, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES); } else{ //standard old code double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.01; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; F.function = &dNion_ConditionallnM_MINI; F.params = &parameters_gsl_SFR_con; lower_limit = M1; upper_limit = M2; gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); gsl_integration_workspace_free (w); if(delta2 > delta1) { result = 1.; return result; } else { return result; } } } double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) { if (FAST_FCOLL_TABLES) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. return GaussLegendreQuad_Nion(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES); } else{ //standard double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.01; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; F.function = &dNion_ConditionallnM; F.params = &parameters_gsl_SFR_con; lower_limit = M1; upper_limit = M2; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("(function argument): %e %e %e %e %e\n",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: %e %e %e %e %e %e %e %e %e %e %e %e %e\n",growthf,M1,M2,sigma2,delta1,delta2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc); Throw GSLError; } gsl_integration_workspace_free (w); if(delta2 > delta1) { result = 1.; return result; } else { return result; } } } float Nion_ConditionallnM_GL_MINI(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){ float M = exp(lnM); float growthf = parameters_gsl_SFR_con.gf_obs; float M2 = parameters_gsl_SFR_con.Mval; float sigma2 = parameters_gsl_SFR_con.sigma2; float del1 = parameters_gsl_SFR_con.delta1; float del2 = parameters_gsl_SFR_con.delta2; float MassTurnover = parameters_gsl_SFR_con.Mdrop; float MassTurnover_upper = parameters_gsl_SFR_con.Mdrop_upper; float Alpha_star = parameters_gsl_SFR_con.pl_star; float Alpha_esc = parameters_gsl_SFR_con.pl_esc; float Fstar7_MINI = parameters_gsl_SFR_con.frac_star; float Fesc7_MINI = parameters_gsl_SFR_con.frac_esc; float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar; float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc; float Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } float Nion_ConditionallnM_GL(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){ float M = exp(lnM); float growthf = parameters_gsl_SFR_con.gf_obs; float M2 = parameters_gsl_SFR_con.Mval; float sigma2 = parameters_gsl_SFR_con.sigma2; float del1 = parameters_gsl_SFR_con.delta1; float del2 = parameters_gsl_SFR_con.delta2; float MassTurnover = parameters_gsl_SFR_con.Mdrop; float Alpha_star = parameters_gsl_SFR_con.pl_star; float Alpha_esc = parameters_gsl_SFR_con.pl_esc; float Fstar10 = parameters_gsl_SFR_con.frac_star; float Fesc10 = parameters_gsl_SFR_con.frac_esc; float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar; float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc; float Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } //JBM: Same as above but for minihaloes. Has two cutoffs, lower and upper. float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES) { double result, nu_lower_limit, nu_higher_limit, nupivot; int i; double integrand, x; integrand = 0.; struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar7_MINI, .frac_esc = Fesc7_MINI, .LimitMass_Fstar = Mlim_Fstar_MINI, .LimitMass_Fesc = Mlim_Fesc_MINI }; if(delta2 > delta1*0.9999) { result = 1.; return result; } if(FAST_FCOLL_TABLES){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. if(MassTurnover_upper <= MassTurnover){ return 1e-40; //in sharp cut it's zero } double delta_arg = pow( (delta1 - delta2)/growthf , 2.); double LogMass=log(MassTurnover); int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_lower_limit = delta_arg/(sigmaM1 * sigmaM1 - sigma2 * sigma2); LogMass = log(MassTurnover_upper); MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM2 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_higher_limit = delta_arg/(sigmaM2*sigmaM2-sigma2*sigma2); //note we keep nupivot1 just in case very negative delta makes it reach that nu LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose. LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2); double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M) double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot1>nu>nupivot2 (small M) double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M) //beta2 fixed by continuity. // // 3PLs double fcollres=0.0; double fcollres_high=0.0; //for the higher threshold to subtract // re-written for further speedups if (nu_higher_limit <= nupivot2){ //if both are below pivot2 don't bother adding and subtracting the high contribution fcollres=(Fcollapprox(nu_lower_limit,beta3))*pow(nupivot2,-beta3); fcollres_high=(Fcollapprox(nu_higher_limit,beta3))*pow(nupivot2,-beta3); } else { fcollres_high=(Fcollapprox(nu_higher_limit,beta2))*pow(nupivot1,-beta2); if (nu_lower_limit > nupivot2){ fcollres=(Fcollapprox(nu_lower_limit,beta2))*pow(nupivot1,-beta2); } else { fcollres=(Fcollapprox(nupivot2,beta2))*pow(nupivot1,-beta2); fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3); } } if (fcollres < fcollres_high){ return 1e-40; } return (fcollres-fcollres_high); } else{ for(i=1; i<(n+1); i++){ if(Type==1) { x = xi_SFR_Xray[i]; integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con); } if(Type==0) { x = xi_SFR[i]; integrand += wi_SFR[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con); } } return integrand; } } //JBM: Added the approximation if user_params->FAST_FCOLL_TABLES==True float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES) { //Performs the Gauss-Legendre quadrature. int i; double result, nu_lower_limit, nupivot; if(delta2 > delta1*0.9999) { result = 1.; return result; } double integrand, x; integrand = 0.; struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; if (FAST_FCOLL_TABLES && global_params.USE_FAST_ATOMIC){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. double delta_arg = pow( (delta1 - delta2)/growthf , 2.0); double LogMass=log(MassTurnover); int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_lower_limit = delta_arg/(sigmaM1*sigmaM1-sigma2*sigma2); LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose. LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2); double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M) double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot2<nu<nupivot1 (small M) double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M) //beta2 fixed by continuity. double nucrit_sigma2 = delta_arg*pow(sigma2+1e-10,-2.0); //above this nu sigma2>sigma1, so HMF=0. eps added to avoid infinities // // 3PLs double fcollres=0.0; if(nu_lower_limit >= nucrit_sigma2){ //fully in the flat part of sigma(nu), M^alpha is nu-independent. return 1e-40; } else{ //we subtract the contribution from high nu, since the HMF is set to 0 if sigma2>sigma1 fcollres -= Fcollapprox(nucrit_sigma2,beta1)*pow(nupivot1,-beta1); } if(nu_lower_limit >= nupivot1){ fcollres+=Fcollapprox(nu_lower_limit,beta1)*pow(nupivot1,-beta1); } else{ fcollres+=Fcollapprox(nupivot1,beta1)*pow(nupivot1,-beta1); if (nu_lower_limit > nupivot2){ fcollres+=(Fcollapprox(nu_lower_limit,beta2)-Fcollapprox(nupivot1,beta2))*pow(nupivot1,-beta2); } else { fcollres+=(Fcollapprox(nupivot2,beta2)-Fcollapprox(nupivot1,beta2) )*pow(nupivot1,-beta2); fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3); } } if (fcollres<=0.0){ LOG_DEBUG("Negative fcoll? fc=%.1le Mt=%.1le \n",fcollres, MassTurnover); fcollres=1e-40; } return fcollres; } else{ for(i=1; i<(n+1); i++){ if(Type==1) { x = xi_SFR_Xray[i]; integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con); } if(Type==0) { x = xi_SFR[i]; integrand += wi_SFR[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con); } } return integrand; } } #include <gsl/gsl_sf_gamma.h> //JBM: Integral of a power-law times exponential for EPS: \int dnu nu^beta * exp(-nu/2)/sqrt(nu) from numin to infty. double Fcollapprox (double numin, double beta){ //nu is deltacrit^2/sigma^2, corrected by delta(R) and sigma(R) double gg = gsl_sf_gamma_inc(0.5+beta,0.5*numin); return gg*pow(2,0.5+beta)*pow(2.0*PI,-0.5); } void initialise_Nion_General_spline(float z, float min_density, float max_density, float Mmax, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES){ float Mmin = MassTurnover/50.; double overdense_val, growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; #pragma omp parallel shared(log10_overdense_spline_SFR,log10_Nion_spline,overdense_small_low,overdense_small_high,growthf,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i,overdense_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ overdense_val = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); log10_overdense_spline_SFR[i] = overdense_val; log10_Nion_spline[i] = GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,pow(10.,overdense_val)-1.,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(fabs(log10_Nion_spline[i]) < 1e-38) { log10_Nion_spline[i] = 1e-38; } log10_Nion_spline[i] = log10(log10_Nion_spline[i]); if(log10_Nion_spline[i] < -40.){ log10_Nion_spline[i] = -40.; } log10_Nion_spline[i] *= ln_10; } } for (i=0; i<NSFR_low; i++){ if(!isfinite(log10_Nion_spline[i])) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); Throw(ParameterError); } } #pragma omp parallel shared(Overdense_spline_SFR,Nion_spline,overdense_large_low,overdense_large_high,growthf,Mmin,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); Nion_spline[i] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(Nion_spline[i]<0.) { Nion_spline[i]=pow(10.,-40.0); } } } for(i=0;i<NSFR_high;i++) { if(!isfinite(Nion_spline[i])) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); Throw(ParameterError); } } } void initialise_Nion_General_spline_MINI(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){ double growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i,j; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; for (i=0; i<NSFR_low; i++){ log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); } for (i=0;i<NSFR_high;i++) { Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } for (i=0;i<NMTURN;i++){ Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min)); Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI)); } #pragma omp parallel shared(log10_Nion_spline,growthf,Mmax,sigma2,log10_overdense_spline_SFR,Mturns,Mturns_MINI,\ Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,ln_10,log10_Nion_spline_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns[j],Alpha_star,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES)); if(log10_Nion_spline[i+j*NSFR_low] < -40.){ log10_Nion_spline[i+j*NSFR_low] = -40.; } log10_Nion_spline[i+j*NSFR_low] *= ln_10; log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\ Alpha_star,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES)); if(log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){ log10_Nion_spline_MINI[i+j*NSFR_low] = -40.; } log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ if(isfinite(log10_Nion_spline[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); Throw(ParameterError); } if(isfinite(log10_Nion_spline_MINI[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline_MINI"); Throw(ParameterError); } } } #pragma omp parallel shared(Nion_spline,growthf,Mmin,Mmax,sigma2,Overdense_spline_SFR,Mturns,Alpha_star,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ Nion_spline[i+j*NSFR_high] = Nion_ConditionalM( growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i], Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES ); if(Nion_spline[i+j*NSFR_high]<0.) { Nion_spline[i+j*NSFR_high]=pow(10.,-40.0); } Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI( growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i], Mturns_MINI[j],Mcrit_atom,Alpha_star,Alpha_esc,Fstar7_MINI,Fesc7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES ); if(Nion_spline_MINI[i+j*NSFR_high]<0.) { Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0); } } } } for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ if(isfinite(Nion_spline[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_spline"); Throw(ParameterError); } if(isfinite(Nion_spline_MINI[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_spline_MINI"); Throw(ParameterError); } } } } void initialise_Nion_General_spline_MINI_prev(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){ double growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i,j; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; for (i=0; i<NSFR_low; i++){ prev_log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); } for (i=0;i<NSFR_high;i++) { prev_Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } for (i=0;i<NMTURN;i++){ Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min)); Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI)); } #pragma omp parallel shared(prev_log10_Nion_spline,growthf,Mmax,sigma2,prev_log10_overdense_spline_SFR,Mturns,Alpha_star,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_log10_Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ prev_log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns[j],\ Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES)); if(prev_log10_Nion_spline[i+j*NSFR_low] < -40.){ prev_log10_Nion_spline[i+j*NSFR_low] = -40.; } prev_log10_Nion_spline[i+j*NSFR_low] *= ln_10; prev_log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\ Alpha_star,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES)); if(prev_log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){ prev_log10_Nion_spline_MINI[i+j*NSFR_low] = -40.; } prev_log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ if(isfinite(prev_log10_Nion_spline[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline"); Throw(ParameterError); } if(isfinite(prev_log10_Nion_spline_MINI[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline_MINI"); Throw(ParameterError); } } } #pragma omp parallel shared(prev_Nion_spline,growthf,Mmin,Mmax,sigma2,prev_Overdense_spline_SFR,Mturns,\ Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_Nion_spline_MINI,Mturns_MINI,\ Mcrit_atom,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ prev_Nion_spline[i+j*NSFR_high] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,prev_Overdense_spline_SFR[i],\ Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(prev_Nion_spline[i+j*NSFR_high]<0.) { prev_Nion_spline[i+j*NSFR_high]=pow(10.,-40.0); } prev_Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI(growthf,Mmin,Mmax,sigma2,Deltac,\ prev_Overdense_spline_SFR[i],Mturns_MINI[j],Mcrit_atom,Alpha_star,\ Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES); if(prev_Nion_spline_MINI[i+j*NSFR_high]<0.) { prev_Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0); } } } } for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ if(isfinite(prev_Nion_spline[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline"); Throw(ParameterError); } if(isfinite(prev_Nion_spline_MINI[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline_MINI"); Throw(ParameterError); } } } } void initialise_Nion_Ts_spline( int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10 ){ int i; float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fesc; if (z_val == NULL){ z_val = calloc(Nbin,sizeof(double)); Nion_z_val = calloc(Nbin,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10); #pragma omp parallel shared(z_val,Nion_z_val,zmin,zmax, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Nion_z_val[i] = Nion_General(z_val[i], Mmin, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc); } } for (i=0; i<Nbin; i++){ if(isfinite(Nion_z_val[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val"); Throw(ParameterError); } } } void initialise_Nion_Ts_spline_MINI( int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Fstar7_MINI, float Fesc7_MINI ){ int i,j; float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fesc, Mlim_Fstar_MINI, Mlim_Fesc_MINI, Mcrit_atom_val; if (z_val == NULL){ z_val = calloc(Nbin,sizeof(double)); Nion_z_val = calloc(Nbin,sizeof(double)); Nion_z_val_MINI = calloc(Nbin*NMTURN,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar7_MINI * pow(1e3, Alpha_star)); Mlim_Fesc_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc7_MINI * pow(1e3, Alpha_esc)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } #pragma omp parallel shared(z_val,Nion_z_val,Nbin,zmin,zmax,Mmin,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,\ Nion_z_val_MINI,MassTurnover,Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI) \ private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Mcrit_atom_val = atomic_cooling_threshold(z_val[i]); Nion_z_val[i] = Nion_General(z_val[i], Mmin, Mcrit_atom_val, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc); for (j=0; j<NMTURN; j++){ Nion_z_val_MINI[i+j*Nbin] = Nion_General_MINI(z_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star, Alpha_esc, Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI); } } } for (i=0; i<Nbin; i++){ if(isfinite(Nion_z_val[i])==0) { i = Nbin; LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val"); Throw(ParameterError); } for (j=0; j<NMTURN; j++){ if(isfinite(Nion_z_val_MINI[i+j*Nbin])==0){ j = NMTURN; LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val_MINI"); Throw(ParameterError); } } } } void initialise_SFRD_spline(int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Fstar10){ int i; float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar; if (z_X_val == NULL){ z_X_val = calloc(Nbin,sizeof(double)); SFRD_val = calloc(Nbin,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); #pragma omp parallel shared(z_X_val,SFRD_val,zmin,zmax, MassTurn, Alpha_star, Fstar10, Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); SFRD_val[i] = Nion_General(z_X_val[i], Mmin, MassTurn, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.); } } for (i=0; i<Nbin; i++){ if(isfinite(SFRD_val[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_val"); Throw(ParameterError); } } } void initialise_SFRD_spline_MINI(int Nbin, float zmin, float zmax, float Alpha_star, float Fstar10, float Fstar7_MINI){ int i,j; float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fstar_MINI, Mcrit_atom_val; if (z_X_val == NULL){ z_X_val = calloc(Nbin,sizeof(double)); SFRD_val = calloc(Nbin,sizeof(double)); SFRD_val_MINI = calloc(Nbin*NMTURN,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar7_MINI * pow(1e3, Alpha_star)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } #pragma omp parallel shared(z_X_val,zmin,zmax,Nbin,SFRD_val,Mmin, Alpha_star,Fstar10,Mlim_Fstar,\ SFRD_val_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \ private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Mcrit_atom_val = atomic_cooling_threshold(z_X_val[i]); SFRD_val[i] = Nion_General(z_X_val[i], Mmin, Mcrit_atom_val, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.); for (j=0; j<NMTURN; j++){ SFRD_val_MINI[i+j*Nbin] = Nion_General_MINI(z_X_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star, 0., Fstar7_MINI, 1.,Mlim_Fstar_MINI,0.); } } } for (i=0; i<Nbin; i++){ if(isfinite(SFRD_val[i])==0) { i = Nbin; LOG_ERROR("Detected either an infinite or NaN value in SFRD_val"); Throw(ParameterError); } for (j=0; j<NMTURN; j++){ if(isfinite(SFRD_val_MINI[i+j*Nbin])==0) { j = NMTURN; LOG_ERROR("Detected either an infinite or NaN value in SFRD_val_MINI"); Throw(ParameterError); } } } } void initialise_SFRD_Conditional_table( int Nfilter, float min_density[], float max_density[], float growthf[], float R[], float MassTurnover, float Alpha_star, float Fstar10, bool FAST_FCOLL_TABLES ){ double overdense_val; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION; double overdense_small_high, overdense_small_low; float Mmin,Mmax,Mlim_Fstar,sigma2; int i,j,k,i_tot; float ln_10; ln_10 = log(10); Mmin = MassTurnover/50.; Mmax = RtoM(R[Nfilter-1]); Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mmin = log(Mmin); for (i=0; i<NSFR_high;i++) { overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } float MassBinLow; int MassBin; for (j=0; j < Nfilter; j++) { Mmax = RtoM(R[j]); initialiseGL_Nion_Xray(NGL_SFR, MassTurnover/50., Mmax); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; if(min_density[j]*growthf[j] < -1.) { overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT; } else { overdense_small_low = min_density[j]*growthf[j]; } overdense_small_high = max_density[j]*growthf[j]; if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) { overdense_small_high = global_params.CRIT_DENS_TRANSITION; } for (i=0; i<NSFR_low; i++) { overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); overdense_low_table[i] = pow(10.,overdense_val); } #pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ log10_SFRD_z_low_table[j][i] = GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); // printf("%d %d %e %e %e %e %e %e %e %e %e %e %e %e %e %e %e\n",j,i,R[j],RtoM(R[j]),growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0.,log10_SFRD_z_low_table[j][i]); if(fabs(log10_SFRD_z_low_table[j][i]) < 1e-38) { log10_SFRD_z_low_table[j][i] = 1e-38; } log10_SFRD_z_low_table[j][i] = log10(log10_SFRD_z_low_table[j][i]); log10_SFRD_z_low_table[j][i] += 10.0; log10_SFRD_z_low_table[j][i] *= ln_10; } } for (i=0; i<NSFR_low; i++){ if(isfinite(log10_SFRD_z_low_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table"); Throw(ParameterError); } } #pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); SFRD_z_high_table[j][i] *= pow(10., 10.0); } } for(i=0;i<NSFR_high;i++) { if(isfinite(SFRD_z_high_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table"); Throw(ParameterError); } } } } void initialise_SFRD_Conditional_table_MINI( int Nfilter, float min_density[], float max_density[], float growthf[], float R[], float Mcrit_atom[], float Alpha_star, float Fstar10, float Fstar7_MINI, bool FAST_FCOLL_TABLES ){ double overdense_val; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION; double overdense_small_high, overdense_small_low; float Mmin,Mmax,Mlim_Fstar,sigma2,Mlim_Fstar_MINI; int i,j,k,i_tot; float ln_10; ln_10 = log(10); Mmin = global_params.M_MIN_INTEGRAL; Mmax = RtoM(R[Nfilter-1]); Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar7_MINI * pow(1e3, Alpha_star)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } Mmin = log(Mmin); for (i=0; i<NSFR_high;i++) { overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } float MassBinLow; int MassBin; for (j=0; j < Nfilter; j++) { Mmax = RtoM(R[j]); initialiseGL_Nion_Xray(NGL_SFR, global_params.M_MIN_INTEGRAL, Mmax); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; if(min_density[j]*growthf[j] < -1.) { overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT; } else { overdense_small_low = min_density[j]*growthf[j]; } overdense_small_high = max_density[j]*growthf[j]; if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) { overdense_small_high = global_params.CRIT_DENS_TRANSITION; } for (i=0; i<NSFR_low; i++) { overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); overdense_low_table[i] = pow(10.,overdense_val); } #pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,Mcrit_atom,Alpha_star,Fstar10,Mlim_Fstar,\ log10_SFRD_z_low_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI,ln_10) \ private(i,k) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ log10_SFRD_z_low_table[j][i] = log10(GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES)); if(log10_SFRD_z_low_table[j][i] < -50.){ log10_SFRD_z_low_table[j][i] = -50.; } log10_SFRD_z_low_table[j][i] += 10.0; log10_SFRD_z_low_table[j][i] *= ln_10; for (k=0; k<NMTURN; k++){ log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover[k], Mcrit_atom[j],Alpha_star,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES)); if(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] < -50.){ log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = -50.; } log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] += 10.0; log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ if(isfinite(log10_SFRD_z_low_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table"); Throw(ParameterError); } for (k=0; k<NMTURN; k++){ if(isfinite(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table_MINI"); Throw(ParameterError); } } } #pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,Mcrit_atom,Alpha_star,Fstar10,\ Mlim_Fstar,SFRD_z_high_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \ private(i,k) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],\ Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); if (SFRD_z_high_table[j][i] < 1e-50){ SFRD_z_high_table[j][i] = 1e-50; } SFRD_z_high_table[j][i] *= pow(10., 10.0); for (k=0; k<NMTURN; k++){ SFRD_z_high_table_MINI[j][i+k*NSFR_high] = Nion_ConditionalM_MINI(growthf[j],Mmin,Mmax,sigma2,Deltac,\ overdense_high_table[i],MassTurnover[k],Mcrit_atom[j],\ Alpha_star,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES); if (SFRD_z_high_table_MINI[j][i+k*NSFR_high] < 1e-50){ SFRD_z_high_table_MINI[j][i+k*NSFR_high] = 1e-50; } } } } for(i=0;i<NSFR_high;i++) { if(isfinite(SFRD_z_high_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table"); Throw(ParameterError); } for (k=0; k<NMTURN; k++){ if(isfinite(SFRD_z_high_table_MINI[j][i+k*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table_MINI"); Throw(ParameterError); } } } } } // The volume filling factor at a given redshift, Q(z), or find redshift at a given Q, z(Q). // // The evolution of Q can be written as // dQ/dt = n_{ion}/dt - Q/t_{rec}, // where n_{ion} is the number of ionizing photons per baryon. The averaged recombination time is given by // t_{rec} ~ 0.93 Gyr * (C_{HII}/3)^-1 * (T_0/2e4 K)^0.7 * ((1+z)/7)^-3. // We assume the clumping factor of C_{HII}=3 and the IGM temperature of T_0 = 2e4 K, following // Section 2.1 of Kuhlen & Faucher-Gigue`re (2012) MNRAS, 423, 862 and references therein. // 1) initialise interpolation table // -> initialise_Q_value_spline(NoRec, M_TURN, ALPHA_STAR, ALPHA_ESC, F_STAR10, F_ESC10) // NoRec = 0: Compute dQ/dt with the recombination time. // NoRec = 1: Ignore recombination. // 2) find Q value at a given z -> Q_at_z(z, &(Q)) // or find z at a given Q -> z_at_Q(Q, &(z)). // 3) free memory allocation -> free_Q_value() // Set up interpolation table for the volume filling factor, Q, at a given redshift z and redshift at a given Q. int InitialisePhotonCons(struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options) { /* This is an API-level function for initialising the photon conservation. */ int status; Try{ // this try wraps the whole function. Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); init_ps(); // To solve differentail equation, uses Euler's method. // NOTE: // (1) With the fiducial parameter set, // when the Q value is < 0.9, the difference is less than 5% compared with accurate calculation. // When Q ~ 0.98, the difference is ~25%. To increase accuracy one can reduce the step size 'da', but it will increase computing time. // (2) With the fiducial parameter set, // the difference for the redshift where the reionization end (Q = 1) is ~0.2 % compared with accurate calculation. float ION_EFF_FACTOR,M_MIN,M_MIN_z0,M_MIN_z1,Mlim_Fstar, Mlim_Fesc; double a_start = 0.03, a_end = 1./(1. + global_params.PhotonConsEndCalibz); // Scale factors of 0.03 and 0.17 correspond to redshifts of ~32 and ~5.0, respectively. double C_HII = 3., T_0 = 2e4; double reduce_ratio = 1.003; double Q0,Q1,Nion0,Nion1,Trec,da,a,z0,z1,zi,dadt,ans,delta_a,zi_prev,Q1_prev; double *z_arr,*Q_arr; int Nmax = 2000; // This is the number of step, enough with 'da = 2e-3'. If 'da' is reduced, this number should be checked. int cnt, nbin, i, istart; int fail_condition, not_mono_increasing, num_fails; int gsl_status; z_arr = calloc(Nmax,sizeof(double)); Q_arr = calloc(Nmax,sizeof(double)); //set the minimum source mass if (flag_options->USE_MASS_DEPENDENT_ZETA) { ION_EFF_FACTOR = global_params.Pop2_ion * astro_params->F_STAR10 * astro_params->F_ESC10; M_MIN = astro_params->M_TURN/50.; Mlim_Fstar = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_STAR, astro_params->F_STAR10); Mlim_Fesc = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_ESC, astro_params->F_ESC10); if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN),1e20); } else{ initialiseSigmaMInterpTable(M_MIN,1e20); } } else { ION_EFF_FACTOR = astro_params->HII_EFF_FACTOR; } fail_condition = 1; num_fails = 0; // We are going to come up with the analytic curve for the photon non conservation correction // This can be somewhat numerically unstable and as such we increase the sampling until it works // If it fails to produce a monotonically increasing curve (for Q as a function of z) after 10 attempts we crash out while(fail_condition!=0) { a = a_start; if(num_fails < 3) { da = 3e-3 - ((double)num_fails)*(1e-3); } else { da = 1e-3 - ((double)num_fails - 2.)*(1e-4); } delta_a = 1e-7; zi_prev = Q1_prev = 0.; not_mono_increasing = 0; if(num_fails>0) { for(i=0;i<Nmax;i++) { z_arr[i] = 0.; Q_arr[i] = 0.; } } cnt = 0; Q0 = 0.; while (a < a_end) { zi = 1./a - 1.; z0 = 1./(a+delta_a) - 1.; z1 = 1./(a-delta_a) - 1.; // Ionizing emissivity (num of photons per baryon) if (flag_options->USE_MASS_DEPENDENT_ZETA) { Nion0 = ION_EFF_FACTOR*Nion_General(z0, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR, astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10, Mlim_Fstar, Mlim_Fesc); Nion1 = ION_EFF_FACTOR*Nion_General(z1, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR, astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10, Mlim_Fstar, Mlim_Fesc); } else { //set the minimum source mass if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 1.22); M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 1.22); } else { // ionized IGM M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 0.6); M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 0.6); } if(M_MIN_z0 < M_MIN_z1) { if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z0),1e20); } else{ initialiseSigmaMInterpTable(M_MIN_z0,1e20); } } else { if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z1),1e20); } else{ initialiseSigmaMInterpTable(M_MIN_z1,1e20); } } Nion0 = ION_EFF_FACTOR*FgtrM_General(z0,M_MIN_z0); Nion1 = ION_EFF_FACTOR*FgtrM_General(z1,M_MIN_z1); freeSigmaMInterpTable(); } // With scale factor a, the above equation is written as dQ/da = n_{ion}/da - Q/t_{rec}*(dt/da) if (!global_params.RecombPhotonCons) { Q1 = Q0 + ((Nion0-Nion1)/2/delta_a)*da; // No Recombination } else { dadt = Ho*sqrt(cosmo_params_ps->OMm/a + global_params.OMr/a/a + cosmo_params_ps->OMl*a*a); // da/dt = Ho*a*sqrt(OMm/a^3 + OMr/a^4 + OMl) Trec = 0.93 * 1e9 * SperYR * pow(C_HII/3.,-1) * pow(T_0/2e4,0.7) * pow((1.+zi)/7.,-3); Q1 = Q0 + ((Nion0-Nion1)/2./delta_a - Q0/Trec/dadt)*da; } // Curve is no longer monotonically increasing, we are going to have to exit and start again if(Q1 < Q1_prev) { not_mono_increasing = 1; break; } zi_prev = zi; Q1_prev = Q1; z_arr[cnt] = zi; Q_arr[cnt] = Q1; cnt = cnt + 1; if (Q1 >= 1.0) break; // if fully ionized, stop here. // As the Q value increases, the bin size decreases gradually because more accurate calculation is required. if (da < 7e-5) da = 7e-5; // set minimum bin size. else da = pow(da,reduce_ratio); Q0 = Q1; a = a + da; } // A check to see if we ended up with a monotonically increasing function if(not_mono_increasing==0) { fail_condition = 0; } else { num_fails += 1; if(num_fails>10) { LOG_ERROR("Failed too many times."); Throw ParameterError; } } } cnt = cnt - 1; istart = 0; for (i=1;i<cnt;i++){ if (Q_arr[i-1] == 0. && Q_arr[i] != 0.) istart = i-1; } nbin = cnt - istart; N_analytic = nbin; // initialise interploation Q as a function of z z_Q = calloc(nbin,sizeof(double)); Q_value = calloc(nbin,sizeof(double)); Q_at_z_spline_acc = gsl_interp_accel_alloc (); Q_at_z_spline = gsl_spline_alloc (gsl_interp_cspline, nbin); for (i=0; i<nbin; i++){ z_Q[i] = z_arr[cnt-i]; Q_value[i] = Q_arr[cnt-i]; } gsl_set_error_handler_off(); gsl_status = gsl_spline_init(Q_at_z_spline, z_Q, Q_value, nbin); GSL_ERROR(gsl_status); Zmin = z_Q[0]; Zmax = z_Q[nbin-1]; Qmin = Q_value[nbin-1]; Qmax = Q_value[0]; // initialise interpolation z as a function of Q double *Q_z = calloc(nbin,sizeof(double)); double *z_value = calloc(nbin,sizeof(double)); z_at_Q_spline_acc = gsl_interp_accel_alloc (); z_at_Q_spline = gsl_spline_alloc (gsl_interp_linear, nbin); for (i=0; i<nbin; i++){ Q_z[i] = Q_value[nbin-1-i]; z_value[i] = z_Q[nbin-1-i]; } gsl_status = gsl_spline_init(z_at_Q_spline, Q_z, z_value, nbin); GSL_ERROR(gsl_status); free(z_arr); free(Q_arr); if (flag_options->USE_MASS_DEPENDENT_ZETA) { freeSigmaMInterpTable; } LOG_DEBUG("Initialised PhotonCons."); } // End of try Catch(status){ return status; } return(0); } // Function to construct the spline for the calibration curve of the photon non-conservation int PhotonCons_Calibration(double *z_estimate, double *xH_estimate, int NSpline){ int status; Try{ if(xH_estimate[NSpline-1] > 0.0 && xH_estimate[NSpline-2] > 0.0 && xH_estimate[NSpline-3] > 0.0 && xH_estimate[0] <= global_params.PhotonConsStart) { initialise_NFHistory_spline(z_estimate,xH_estimate,NSpline); } } Catch(status){ return status; } return(0); } // Function callable from Python to know at which redshift to start sampling the calibration curve (to minimise function calls) int ComputeZstart_PhotonCons(double *zstart) { int status; double temp; Try{ if((1.-global_params.PhotonConsStart) > Qmax) { // It is possible that reionisation never even starts // Just need to arbitrarily set a high redshift to perform the algorithm temp = 20.; } else { z_at_Q(1. - global_params.PhotonConsStart,&(temp)); // Multiply the result by 10 per-cent to fix instances when this isn't high enough temp *= 1.1; } } Catch(status){ return(status); // Use the status to determine if something went wrong. } *zstart = temp; return(0); } void determine_deltaz_for_photoncons() { int i, j, increasing_val, counter, smoothing_int; double temp; float z_cal, z_analytic, NF_sample, returned_value, NF_sample_min, gradient_analytic, z_analytic_at_endpoint, const_offset, z_analytic_2, smoothing_width; float bin_width, delta_NF, val1, val2, extrapolated_value; LOG_DEBUG("Determining deltaz for photon cons."); // Number of points for determine the delta z correction of the photon non-conservation N_NFsamples = 100; // Determine the change in neutral fraction to calculate the gradient for the linear extrapolation of the photon non-conservation correction delta_NF = 0.025; // A width (in neutral fraction data points) in which point we average over to try and avoid sharp features in the correction (removes some kinks) // Effectively acts as filtering step smoothing_width = 35.; // The photon non-conservation correction has a threshold (in terms of neutral fraction; global_params.PhotonConsEnd) for which we switch // from using the exact correction between the calibrated (21cmFAST all flag options off) to analytic expression to some extrapolation. // This threshold is required due to the behaviour of 21cmFAST at very low neutral fractions, which cause extreme behaviour with recombinations on // A lot of the steps and choices are not completely rubust, just chosed to smooth/average the data to have smoother resultant reionisation histories // Determine the number of extrapolated points required, if required at all. if(calibrated_NF_min < global_params.PhotonConsEnd) { // We require extrapolation, set minimum point to the threshold, and extrapolate beyond. NF_sample_min = global_params.PhotonConsEnd; // Determine the number of extrapolation points (to better smooth the correction) between the threshod (global_params.PhotonConsEnd) and a // point close to zero neutral fraction (set by global_params.PhotonConsAsymptoteTo) // Choice is to get the delta neutral fraction between extrapolated points to be similar to the cadence in the exact correction if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - calibrated_NF_min)/( global_params.PhotonConsStart - NF_sample_min ); } else { N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - global_params.PhotonConsAsymptoteTo)/( global_params.PhotonConsStart - NF_sample_min ); } N_extrapolated = (int)floor( N_extrapolated ) - 1; // Minus one as the zero point is added below } else { // No extrapolation required, neutral fraction never reaches zero NF_sample_min = calibrated_NF_min; N_extrapolated = 0; } // Determine the bin width for the sampling of the neutral fraction for the correction bin_width = ( global_params.PhotonConsStart - NF_sample_min )/((float)N_NFsamples - 1.); // allocate memory for arrays required to determine the photon non-conservation correction deltaz = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); deltaz_smoothed = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); NeutralFractions = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); // Go through and fill the data points (neutral fraction and corresponding delta z between the calibrated and analytic curves). for(i=0;i<N_NFsamples;i++) { NF_sample = NF_sample_min + bin_width*(float)i; // Determine redshift given a neutral fraction for the calibration curve z_at_NFHist(NF_sample,&(temp)); z_cal = temp; // Determine redshift given a neutral fraction for the analytic curve z_at_Q(1. - NF_sample,&(temp)); z_analytic = temp; deltaz[i+1+N_extrapolated] = fabs( z_cal - z_analytic ); NeutralFractions[i+1+N_extrapolated] = NF_sample; } // Determining the end-point (lowest neutral fraction) for the photon non-conservation correction if(calibrated_NF_min >= global_params.PhotonConsEnd) { increasing_val = 0; counter = 0; // Check if all the values of delta z are increasing for(i=0;i<(N_NFsamples-1);i++) { if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) { counter += 1; } } // If all the values of delta z are increasing, then some of the smoothing of the correction done below cannot be performed if(counter==(N_NFsamples-1)) { increasing_val = 1; } // Since we never have reionisation, need to set an appropriate end-point for the correction // Take some fraction of the previous point to determine the end-point NeutralFractions[0] = 0.999*NF_sample_min; if(increasing_val) { // Values of delta z are always increasing with decreasing neutral fraction thus make the last point slightly larger deltaz[0] = 1.001*deltaz[1]; } else { // Values of delta z are always decreasing with decreasing neutral fraction thus make the last point slightly smaller deltaz[0] = 0.999*deltaz[1]; } } else { // Ok, we are going to be extrapolating the photon non-conservation (delta z) beyond the threshold // Construct a linear curve for the analytic function to extrapolate to the new endpoint // The choice for doing so is to ensure the corrected reionisation history is mostly smooth, and doesn't // artificially result in kinks due to switching between how the delta z should be calculated z_at_Q(1. - (NeutralFractions[1+N_extrapolated] + delta_NF),&(temp)); z_analytic = temp; z_at_Q(1. - NeutralFractions[1+N_extrapolated],&(temp)); z_analytic_2 = temp; // determine the linear curve // Multiplitcation by 1.1 is arbitrary but effectively smooths out most kinks observed in the resultant corrected reionisation histories gradient_analytic = 1.1*( delta_NF )/( z_analytic - z_analytic_2 ); const_offset = ( NeutralFractions[1+N_extrapolated] + delta_NF ) - gradient_analytic * z_analytic; // determine the extrapolation end point if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { extrapolated_value = calibrated_NF_min; } else { extrapolated_value = global_params.PhotonConsAsymptoteTo; } // calculate the delta z for the extrapolated end point z_at_NFHist(extrapolated_value,&(temp)); z_cal = temp; z_analytic_at_endpoint = ( extrapolated_value - const_offset )/gradient_analytic ; deltaz[0] = fabs( z_cal - z_analytic_at_endpoint ); NeutralFractions[0] = extrapolated_value; // If performing extrapolation, add in all the extrapolated points between the end-point and the threshold to end the correction (global_params.PhotonConsEnd) for(i=0;i<N_extrapolated;i++) { if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { NeutralFractions[i+1] = calibrated_NF_min + (NF_sample_min - calibrated_NF_min)*(float)(i+1)/((float)N_extrapolated + 1.); } else { NeutralFractions[i+1] = global_params.PhotonConsAsymptoteTo + (NF_sample_min - global_params.PhotonConsAsymptoteTo)*(float)(i+1)/((float)N_extrapolated + 1.); } deltaz[i+1] = deltaz[0] + ( deltaz[1+N_extrapolated] - deltaz[0] )*(float)(i+1)/((float)N_extrapolated + 1.); } } // We have added the extrapolated values, now check if they are all increasing or not (again, to determine whether or not to try and smooth the corrected curve increasing_val = 0; counter = 0; for(i=0;i<(N_NFsamples-1);i++) { if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) { counter += 1; } } if(counter==(N_NFsamples-1)) { increasing_val = 1; } // For some models, the resultant delta z for extremely high neutral fractions ( > 0.95) seem to oscillate or sometimes drop in value. // This goes through and checks if this occurs, and tries to smooth this out // This doesn't occur very often, but can cause an artificial drop in the reionisation history (neutral fraction value) connecting the // values before/after the photon non-conservation correction starts. for(i=0;i<(N_NFsamples+N_extrapolated);i++) { val1 = deltaz[i]; val2 = deltaz[i+1]; counter = 0; // Check if we have a neutral fraction above 0.95, that the values are decreasing (val2 < val1), that we haven't sampled too many points (counter) // and that the NF_sample_min is less than around 0.8. That is, if a reasonable fraction of the reionisation history is sampled. while( NeutralFractions[i+1] > 0.95 && val2 < val1 && NF_sample_min < 0.8 && counter < 100) { NF_sample = global_params.PhotonConsStart - 0.001*(counter+1); // Determine redshift given a neutral fraction for the calibration curve z_at_NFHist(NF_sample,&(temp)); z_cal = temp; // Determine redshift given a neutral fraction for the analytic curve z_at_Q(1. - NF_sample,&(temp)); z_analytic = temp; // Determine the delta z val2 = fabs( z_cal - z_analytic ); deltaz[i+1] = val2; counter += 1; // If after 100 samplings we couldn't get the value to increase (like it should), just modify it from the previous point. if(counter==100) { deltaz[i+1] = deltaz[i] * 1.01; } } } // Store the data in its intermediate state before averaging for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) { deltaz_smoothed[i] = deltaz[i]; } // If we are not increasing for all values, we can smooth out some features in delta z when connecting the extrapolated delta z values // compared to those from the exact correction (i.e. when we cross the threshold). if(!increasing_val) { for(i=0;i<(N_NFsamples+N_extrapolated);i++) { val1 = deltaz[0]; val2 = deltaz[i+1]; counter = 0; // Try and find a point which can be used to smooth out any dip in delta z as a function of neutral fraction. // It can be flat, then drop, then increase. This smooths over this drop (removes a kink in the resultant reionisation history). // Choice of 75 is somewhat arbitrary while(val2 < val1 && (counter < 75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated))) { counter += 1; val2 = deltaz[i+1+counter]; deltaz_smoothed[i+1] = ( val1 + deltaz[1+(i+1)+counter] )/2.; } if(counter==75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated)) { deltaz_smoothed[i+1] = deltaz[i+1]; } } } // Here we effectively filter over the delta z as a function of neutral fraction to try and minimise any possible kinks etc. in the functional curve. for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) { // We are at the end-points, cannot smooth if(i==0 || i==(N_NFsamples+N_extrapolated)) { deltaz[i] = deltaz_smoothed[i]; } else { deltaz[i] = 0.; // We are symmetrically smoothing, making sure we have the same number of data points either side of the point we are filtering over // This determins the filter width when close to the edge of the data ranges if( (i - (int)floor(smoothing_width/2.) ) < 0) { smoothing_int = 2*( i ) + (int)((int)smoothing_width%2); } else if( (i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) > (N_NFsamples + N_extrapolated) ) { smoothing_int = ((int)smoothing_width - 1) - 2*((i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) - (N_NFsamples + N_extrapolated) ) + (int)((int)smoothing_width%2); } else { smoothing_int = (int)smoothing_width; } // Average (filter) over the delta z values to smooth the result counter = 0; for(j=0;j<(int)smoothing_width;j++) { if(((i - (int)floor((float)smoothing_int/2.) + j)>=0) && ((i - (int)floor((float)smoothing_int/2.) + j) <= (N_NFsamples + N_extrapolated + 1)) && counter < smoothing_int ) { deltaz[i] += deltaz_smoothed[i - (int)floor((float)smoothing_int/2.) + j]; counter += 1; } } deltaz[i] /= (float)counter; } } N_deltaz = N_NFsamples + N_extrapolated + 1; // Now, we can construct the spline of the photon non-conservation correction (delta z as a function of neutral fraction) deltaz_spline_for_photoncons_acc = gsl_interp_accel_alloc (); deltaz_spline_for_photoncons = gsl_spline_alloc (gsl_interp_linear, N_NFsamples + N_extrapolated + 1); gsl_set_error_handler_off(); int gsl_status; gsl_status = gsl_spline_init(deltaz_spline_for_photoncons, NeutralFractions, deltaz, N_NFsamples + N_extrapolated + 1); GSL_ERROR(gsl_status); } float adjust_redshifts_for_photoncons( struct AstroParams *astro_params, struct FlagOptions *flag_options, float *redshift, float *stored_redshift, float *absolute_delta_z ) { int i, new_counter; double temp; float required_NF, adjusted_redshift, future_z, gradient_extrapolation, const_extrapolation, temp_redshift, check_required_NF; LOG_DEBUG("Adjusting redshifts for photon cons."); if(*redshift < global_params.PhotonConsEndCalibz) { LOG_ERROR( "You have passed a redshift (z = %f) that is lower than the enpoint of the photon non-conservation correction "\ "(global_params.PhotonConsEndCalibz = %f). If this behaviour is desired then set global_params.PhotonConsEndCalibz "\ "to a value lower than z = %f.",*redshift,global_params.PhotonConsEndCalibz,*redshift ); Throw(ParameterError); } // Determine the neutral fraction (filling factor) of the analytic calibration expression given the current sampled redshift Q_at_z(*redshift, &(temp)); required_NF = 1.0 - (float)temp; // Find which redshift we need to sample in order for the calibration reionisation history to match the analytic expression if(required_NF > global_params.PhotonConsStart) { // We haven't started ionising yet, so keep redshifts the same adjusted_redshift = *redshift; *absolute_delta_z = 0.; } else if(required_NF<=global_params.PhotonConsEnd) { // We have gone beyond the threshold for the end of the photon non-conservation correction // Deemed to be roughly where the calibration curve starts to approach the analytic expression if(FirstNF_Estimate <= 0. && required_NF <= 0.0) { // Reionisation has already happened well before the calibration adjusted_redshift = *redshift; } else { // We have crossed the NF threshold for the photon conservation correction so now set to the delta z at the threshold if(required_NF < global_params.PhotonConsAsymptoteTo) { // This counts the number of times we have exceeded the extrapolated point and attempts to modify the delta z // to try and make the function a little smoother *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, global_params.PhotonConsAsymptoteTo, deltaz_spline_for_photoncons_acc); new_counter = 0; temp_redshift = *redshift; check_required_NF = required_NF; // Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR // In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling while( check_required_NF < global_params.PhotonConsAsymptoteTo ) { temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.); Q_at_z(temp_redshift, &(temp)); check_required_NF = 1.0 - (float)temp; new_counter += 1; } // Now adjust the final delta_z by some amount to smooth if over successive steps if(deltaz[1] > deltaz[0]) { *absolute_delta_z = pow( 0.96 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } else { *absolute_delta_z = pow( 1.04 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } // Check if we go into the future (z < 0) and avoid it adjusted_redshift = (*redshift) - (*absolute_delta_z); if(adjusted_redshift < 0.0) { adjusted_redshift = 0.0; } } else { *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, required_NF, deltaz_spline_for_photoncons_acc); adjusted_redshift = (*redshift) - (*absolute_delta_z); } } } else { // Initialise the photon non-conservation correction curve if(!photon_cons_allocated) { determine_deltaz_for_photoncons(); photon_cons_allocated = true; } // We have exceeded even the end-point of the extrapolation // Just smooth ever subsequent point // Note that this is deliberately tailored to light-cone quantites, but will still work with co-eval cubes // Though might produce some very minor discrepancies when comparing outputs. if(required_NF < NeutralFractions[0]) { new_counter = 0; temp_redshift = *redshift; check_required_NF = required_NF; // Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR // In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling while( check_required_NF < NeutralFractions[0] ) { temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.); Q_at_z(temp_redshift, &(temp)); check_required_NF = 1.0 - (float)temp; new_counter += 1; } if(new_counter > 5) { LOG_WARNING( "The photon non-conservation correction has employed an extrapolation for\n"\ "more than 5 consecutive snapshots. This can be unstable, thus please check "\ "resultant history. Parameters are:\n" ); #if LOG_LEVEL >= LOG_WARNING writeAstroParams(flag_options, astro_params); #endif } // Now adjust the final delta_z by some amount to smooth if over successive steps if(deltaz[1] > deltaz[0]) { *absolute_delta_z = pow( 0.998 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } else { *absolute_delta_z = pow( 1.002 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } // Check if we go into the future (z < 0) and avoid it adjusted_redshift = (*redshift) - (*absolute_delta_z); if(adjusted_redshift < 0.0) { adjusted_redshift = 0.0; } } else { // Find the corresponding redshift for the calibration curve given the required neutral fraction (filling factor) from the analytic expression *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, (double)required_NF, deltaz_spline_for_photoncons_acc); adjusted_redshift = (*redshift) - (*absolute_delta_z); } } // keep the original sampled redshift *stored_redshift = *redshift; // This redshift snapshot now uses the modified redshift following the photon non-conservation correction *redshift = adjusted_redshift; } void Q_at_z(double z, double *splined_value){ float returned_value; if (z >= Zmax) { *splined_value = 0.; } else if (z <= Zmin) { *splined_value = 1.; } else { returned_value = gsl_spline_eval(Q_at_z_spline, z, Q_at_z_spline_acc); *splined_value = returned_value; } } void z_at_Q(double Q, double *splined_value){ float returned_value; if (Q < Qmin) { LOG_ERROR("The minimum value of Q is %.4e",Qmin); Throw(ParameterError); } else if (Q > Qmax) { LOG_ERROR("The maximum value of Q is %.4e. Reionization ends at ~%.4f.",Qmax,Zmin); Throw(ParameterError); } else { returned_value = gsl_spline_eval(z_at_Q_spline, Q, z_at_Q_spline_acc); *splined_value = returned_value; } } void free_Q_value() { gsl_spline_free (Q_at_z_spline); gsl_interp_accel_free (Q_at_z_spline_acc); gsl_spline_free (z_at_Q_spline); gsl_interp_accel_free (z_at_Q_spline_acc); } void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline){ int i, counter, start_index, found_start_index; // This takes in the data for the calibration curve for the photon non-conservation correction counter = 0; start_index = 0; found_start_index = 0; FinalNF_Estimate = NF_estimate[0]; FirstNF_Estimate = NF_estimate[NSpline-1]; // Determine the point in the data where its no longer zero (basically to avoid too many zeros in the spline) for(i=0;i<NSpline-1;i++) { if(NF_estimate[i+1] > NF_estimate[i]) { if(found_start_index == 0) { start_index = i; found_start_index = 1; } } counter += 1; } counter = counter - start_index; N_calibrated = (counter+1); // Store the data points for determining the photon non-conservation correction nf_vals = calloc((counter+1),sizeof(double)); z_vals = calloc((counter+1),sizeof(double)); calibrated_NF_min = 1.; // Store the data, and determine the end point of the input data for estimating the extrapolated results for(i=0;i<(counter+1);i++) { nf_vals[i] = NF_estimate[start_index+i]; z_vals[i] = redshifts[start_index+i]; // At the extreme high redshift end, there can be numerical issues with the solution of the analytic expression if(i>0) { while(nf_vals[i] <= nf_vals[i-1]) { nf_vals[i] += 0.000001; } } if(nf_vals[i] < calibrated_NF_min) { calibrated_NF_min = nf_vals[i]; } } NFHistory_spline_acc = gsl_interp_accel_alloc (); // NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1)); NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1)); gsl_set_error_handler_off(); int gsl_status; gsl_status = gsl_spline_init(NFHistory_spline, nf_vals, z_vals, (counter+1)); GSL_ERROR(gsl_status); z_NFHistory_spline_acc = gsl_interp_accel_alloc (); // z_NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1)); z_NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1)); gsl_status = gsl_spline_init(z_NFHistory_spline, z_vals, nf_vals, (counter+1)); GSL_ERROR(gsl_status); } void z_at_NFHist(double xHI_Hist, double *splined_value){ float returned_value; returned_value = gsl_spline_eval(NFHistory_spline, xHI_Hist, NFHistory_spline_acc); *splined_value = returned_value; } void NFHist_at_z(double z, double *splined_value){ float returned_value; returned_value = gsl_spline_eval(z_NFHistory_spline, z, NFHistory_spline_acc); *splined_value = returned_value; } int ObtainPhotonConsData( double *z_at_Q_data, double *Q_data, int *Ndata_analytic, double *z_cal_data, double *nf_cal_data, int *Ndata_calibration, double *PhotonCons_NFdata, double *PhotonCons_deltaz, int *Ndata_PhotonCons) { int i; *Ndata_analytic = N_analytic; *Ndata_calibration = N_calibrated; *Ndata_PhotonCons = N_deltaz; for(i=0;i<N_analytic;i++) { z_at_Q_data[i] = z_Q[i]; Q_data[i] = Q_value[i]; } for(i=0;i<N_calibrated;i++) { z_cal_data[i] = z_vals[i]; nf_cal_data[i] = nf_vals[i]; } for(i=0;i<N_deltaz;i++) { PhotonCons_NFdata[i] = NeutralFractions[i]; PhotonCons_deltaz[i] = deltaz[i]; } return(0); } void FreePhotonConsMemory() { LOG_DEBUG("Freeing some photon cons memory."); free(deltaz); free(deltaz_smoothed); free(NeutralFractions); free(z_Q); free(Q_value); free(nf_vals); free(z_vals); free_Q_value(); gsl_spline_free (NFHistory_spline); gsl_interp_accel_free (NFHistory_spline_acc); gsl_spline_free (z_NFHistory_spline); gsl_interp_accel_free (z_NFHistory_spline_acc); gsl_spline_free (deltaz_spline_for_photoncons); gsl_interp_accel_free (deltaz_spline_for_photoncons_acc); LOG_DEBUG("Done Freeing photon cons memory."); photon_cons_allocated = false; } void FreeTsInterpolationTables(struct FlagOptions *flag_options) { LOG_DEBUG("Freeing some interpolation table memory."); freeSigmaMInterpTable(); if (flag_options->USE_MASS_DEPENDENT_ZETA) { free(z_val); z_val = NULL; free(Nion_z_val); free(z_X_val); z_X_val = NULL; free(SFRD_val); if (flag_options->USE_MINI_HALOS){ free(Nion_z_val_MINI); free(SFRD_val_MINI); } } else{ free(FgtrM_1DTable_linear); } LOG_DEBUG("Done Freeing interpolation table memory."); interpolation_tables_allocated = false; }
bspline_create.c
///////////////////////////////////////////////////////////////////////////// // einspline: a library for creating and evaluating B-splines // // Copyright (C) 2007 Kenneth P. Esler, Jr. // // Released under the BSD-3-clause license // ///////////////////////////////////////////////////////////////////////////// #include "bspline_create.h" #ifndef _XOPEN_SOURCE #define _XOPEN_SOURCE 600 #endif #ifndef __USE_XOPEN2K #define __USE_XOPEN2K #endif #include <stdlib.h> #include <stdio.h> #include <inttypes.h> int posix_memalign(void **memptr, size_t alignment, size_t size); //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Helper functions for spline creation //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// void init_sse_data(); void find_coefs_1d_d (Ugrid grid, BCtype_d bc, double *data, intptr_t dstride, double *coefs, intptr_t cstride); void solve_deriv_interp_1d_s (float bands[], float coefs[], int M, int cstride) { // Solve interpolating equations // First and last rows are different bands[4*(0)+1] /= bands[4*(0)+0]; bands[4*(0)+2] /= bands[4*(0)+0]; bands[4*(0)+3] /= bands[4*(0)+0]; bands[4*(0)+0] = 1.0; bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1]; bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2]; bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3]; bands[4*(0)+0] = 0.0; bands[4*(1)+2] /= bands[4*(1)+1]; bands[4*(1)+3] /= bands[4*(1)+1]; bands[4*(1)+1] = 1.0; // Now do rows 2 through M+1 for (int row=2; row < (M+1); row++) { bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3]; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; bands[4*(row)+0] = 0.0; bands[4*(row)+1] = 1.0; } // Do last row bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3]; bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3]; bands[4*(M+1)+3] /= bands[4*(M+1)+2]; bands[4*(M+1)+2] = 1.0; coefs[(M+1)*cstride] = bands[4*(M+1)+3]; // Now back substitute up for (int row=M; row>0; row--) coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)]; // Finish with first row coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_periodic_interp_1d_s (float bands[], float coefs[], float lastCol[], int M, size_t cstride) { // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = coefs[M*cstride]; coefs[(M+1)*cstride] = coefs[1*cstride]; coefs[(M+2)*cstride] = coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_antiperiodic_interp_1d_s (float bands[], float coefs[], float lastCol[], int M, size_t cstride) { bands[4*0+0] *= -1.0; bands[4*(M-1)+2] *= -1.0; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = -coefs[M*cstride]; coefs[(M+1)*cstride] = -coefs[1*cstride]; coefs[(M+2)*cstride] = -coefs[2*cstride]; } #ifdef HIGH_PRECISION void find_coefs_1d_s (Ugrid grid, BCtype_s bc, float *data, intptr_t dstride, float *coefs, intptr_t cstride) { BCtype_d d_bc; double *d_data, *d_coefs; d_bc.lCode = bc.lCode; d_bc.rCode = bc.rCode; d_bc.lVal = bc.lVal; d_bc.rVal = bc.rVal; int M = grid.num, N; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) N = M+3; else N = M+2; d_data = malloc (N*sizeof(double)); d_coefs = malloc (N*sizeof(double)); for (int i=0; i<M; i++) d_data[i] = data[i*dstride]; find_coefs_1d_d (grid, d_bc, d_data, 1, d_coefs, 1); for (int i=0; i<N; i++) coefs[i*cstride] = d_coefs[i]; free (d_data); free (d_coefs); } #else void find_coefs_1d_s (Ugrid grid, BCtype_s bc, float *data, intptr_t dstride, float *coefs, intptr_t cstride) { size_t M = grid.num; float basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0}; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) { float* bands =(float*) malloc(4*M*sizeof(float)); float* lastCol =(float*) malloc(M*sizeof(float)); for (size_t i=0; i<M; i++) { bands[4*i+0] = basis[0]; bands[4*i+1] = basis[1]; bands[4*i+2] = basis[2]; bands[4*i+3] = data[i*dstride]; } if (bc.lCode == PERIODIC) solve_periodic_interp_1d_s (bands, coefs, lastCol, M, cstride); else solve_antiperiodic_interp_1d_s (bands, coefs, lastCol, M, cstride); free (bands); free (lastCol); } else { // Setup boundary conditions float abcd_left[4], abcd_right[4]; // Left boundary if (bc.lCode == FLAT || bc.lCode == NATURAL) bc.lVal = 0.0; if (bc.lCode == FLAT || bc.lCode == DERIV1) { abcd_left[0] = -0.5 * grid.delta_inv; abcd_left[1] = 0.0 * grid.delta_inv; abcd_left[2] = 0.5 * grid.delta_inv; abcd_left[3] = bc.lVal; } if (bc.lCode == NATURAL || bc.lCode == DERIV2) { abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv; abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[3] = bc.lVal; } // Right boundary if (bc.rCode == FLAT || bc.rCode == NATURAL) bc.rVal = 0.0; if (bc.rCode == FLAT || bc.rCode == DERIV1) { abcd_right[0] = -0.5 * grid.delta_inv; abcd_right[1] = 0.0 * grid.delta_inv; abcd_right[2] = 0.5 * grid.delta_inv; abcd_right[3] = bc.rVal; } if (bc.rCode == NATURAL || bc.rCode == DERIV2) { abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv; abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[3] = bc.rVal; } float *bands = malloc ((M+2)*4*sizeof(float)); for (int i=0; i<4; i++) { bands[4*( 0 )+i] = abcd_left[i]; bands[4*(M+1)+i] = abcd_right[i]; } for (int i=0; i<M; i++) { for (int j=0; j<3; j++) bands[4*(i+1)+j] = basis[j]; bands[4*(i+1)+3] = data[i*dstride]; } // Now, solve for coefficients solve_deriv_interp_1d_s (bands, coefs, M, cstride); free (bands); } } #endif //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Single-Precision, Real Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_s* create_UBspline_1d_s (Ugrid x_grid, BCtype_s xBC, float *data) { // Create new spline UBspline_1d_s* restrict spline = malloc (sizeof(UBspline_1d_s)); spline->spcode = U1D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->x_grid = x_grid; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*N); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*N)); #endif find_coefs_1d_s (spline->x_grid, xBC, data, 1, spline->coefs, 1); init_sse_data(); return spline; } void recompute_UBspline_1d_s (UBspline_1d_s* spline, float *data) { find_coefs_1d_s (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1); } UBspline_2d_s* create_UBspline_2d_s (Ugrid x_grid, Ugrid y_grid, BCtype_s xBC, BCtype_s yBC, float *data) { // Create new spline UBspline_2d_s* restrict spline = malloc (sizeof(UBspline_2d_s)); spline->spcode = U2D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, sizeof(float)*Nx*Ny); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_2d_s (UBspline_2d_s* spline, float *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } UBspline_3d_s* create_UBspline_3d_s (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_s xBC, BCtype_s yBC, BCtype_s zBC, float *data) { // Create new spline UBspline_3d_s* spline = malloc (sizeof(UBspline_3d_s)); spline->spcode = U3D; spline->tcode = SINGLE_REAL; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(float)*spline->coefs_size); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*spline->coefs_size)); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_s (spline->x_grid, xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_s (spline->y_grid, yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_s (spline->z_grid, zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_3d_s (UBspline_3d_s* spline, float *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_s (spline->z_grid, spline->zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Single-Precision, Complex Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_c* create_UBspline_1d_c (Ugrid x_grid, BCtype_c xBC, complex_float *data) { // Create new spline UBspline_1d_c* restrict spline = malloc (sizeof(UBspline_1d_c)); spline->spcode = U1D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*N); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*N); #endif BCtype_s xBC_r, xBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, (float*)data, 2, (float*)spline->coefs, 2); // Imaginarty part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+1, 2, ((float*)spline->coefs+1), 2); init_sse_data(); return spline; } void recompute_UBspline_1d_c (UBspline_1d_c* spline, complex_float *data) { BCtype_s xBC_r, xBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, (float*)data, 2, (float*)spline->coefs, 2); // Imaginarty part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+1, 2, ((float*)spline->coefs+1), 2); } UBspline_2d_c* create_UBspline_2d_c (Ugrid x_grid, Ugrid y_grid, BCtype_c xBC, BCtype_c yBC, complex_float *data) { // Create new spline UBspline_2d_c* restrict spline = malloc (sizeof(UBspline_2d_c)); spline->spcode = U2D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny); #endif BCtype_s xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My, (float*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My, ((float*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_2d_c (UBspline_2d_c* spline, complex_float *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; BCtype_s xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My, (float*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My, ((float*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } } UBspline_3d_c* create_UBspline_3d_c (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_c xBC, BCtype_c yBC, BCtype_c zBC, complex_float *data) { // Create new spline UBspline_3d_c* restrict spline = malloc (sizeof(UBspline_3d_c)); spline->spcode = U3D; spline->tcode = SINGLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(float)*Nx*Ny*Nz); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny*Nz); #endif BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode; zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r; zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode; zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz, ((float*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz, ((float*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz, ((float*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz, ((float*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_3d_c (UBspline_3d_c* spline, complex_float *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode; zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r; zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode; zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz, ((float*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz, ((float*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz, ((float*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz, ((float*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2, ((float*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2, ((float*)spline->coefs)+coffset+1, 2); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Double-Precision, Real Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_deriv_interp_1d_d (double bands[], double coefs[], int M, int cstride) { // Solve interpolating equations // First and last rows are different bands[4*(0)+1] /= bands[4*(0)+0]; bands[4*(0)+2] /= bands[4*(0)+0]; bands[4*(0)+3] /= bands[4*(0)+0]; bands[4*(0)+0] = 1.0; bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1]; bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2]; bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3]; bands[4*(0)+0] = 0.0; bands[4*(1)+2] /= bands[4*(1)+1]; bands[4*(1)+3] /= bands[4*(1)+1]; bands[4*(1)+1] = 1.0; // Now do rows 2 through M+1 for (int row=2; row < (M+1); row++) { bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3]; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; bands[4*(row)+0] = 0.0; bands[4*(row)+1] = 1.0; } // Do last row bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3]; bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2]; bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3]; bands[4*(M+1)+3] /= bands[4*(M+1)+2]; bands[4*(M+1)+2] = 1.0; coefs[(M+1)*cstride] = bands[4*(M+1)+3]; // Now back substitute up for (int row=M; row>0; row--) coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)]; // Finish with first row coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_periodic_interp_1d_d (double bands[], double coefs[], double lastCol[], int M, size_t cstride) { // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = coefs[M*cstride]; coefs[(M+1)*cstride] = coefs[1*cstride]; coefs[(M+2)*cstride] = coefs[2*cstride]; } // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs void solve_antiperiodic_interp_1d_d (double bands[], double coefs[], double lastCol[], int M, size_t cstride) { bands[4*0+0] *= -1.0; bands[4*(M-1)+2] *= -1.0; // Now solve: // First and last rows are different bands[4*(0)+2] /= bands[4*(0)+1]; bands[4*(0)+0] /= bands[4*(0)+1]; bands[4*(0)+3] /= bands[4*(0)+1]; bands[4*(0)+1] = 1.0; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0]; bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2]; lastCol[0] = bands[4*(0)+0]; for (int row=1; row < (M-1); row++) { bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2]; bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3]; lastCol[row] = -bands[4*(row)+0] * lastCol[row-1]; bands[4*(row)+0] = 0.0; bands[4*(row)+2] /= bands[4*(row)+1]; bands[4*(row)+3] /= bands[4*(row)+1]; lastCol[row] /= bands[4*(row)+1]; bands[4*(row)+1] = 1.0; if (row < (M-2)) { bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3]; bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row]; bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2]; } } // Now do last row // The [2] element and [0] element are now on top of each other bands[4*(M-1)+0] += bands[4*(M-1)+2]; bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]); bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3]; bands[4*(M-1)+3] /= bands[4*(M-1)+1]; coefs[M*cstride] = bands[4*(M-1)+3]; for (int row=M-2; row>=0; row--) coefs[(row+1)*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride]; coefs[0*cstride] = -coefs[M*cstride]; coefs[(M+1)*cstride] = -coefs[1*cstride]; coefs[(M+2)*cstride] = -coefs[2*cstride]; } void find_coefs_1d_d (Ugrid grid, BCtype_d bc, double *data, intptr_t dstride, double *coefs, intptr_t cstride) { int M = grid.num; double basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0}; if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) { double *bands = (double*)malloc (4*M*sizeof(double)); double* lastCol = (double*) malloc(M*sizeof(double)); for (int i=0; i<M; i++) { bands[4*i+0] = basis[0]; bands[4*i+1] = basis[1]; bands[4*i+2] = basis[2]; bands[4*i+3] = data[i*dstride]; } if (bc.lCode == ANTIPERIODIC) solve_antiperiodic_interp_1d_d (bands, coefs, lastCol, M, cstride); else solve_periodic_interp_1d_d (bands, coefs, lastCol, M, cstride); free (bands); free (lastCol); } else { // Setup boundary conditions double abcd_left[4], abcd_right[4]; // Left boundary if (bc.lCode == FLAT || bc.lCode == NATURAL) bc.lVal = 0.0; if (bc.lCode == FLAT || bc.lCode == DERIV1) { abcd_left[0] = -0.5 * grid.delta_inv; abcd_left[1] = 0.0 * grid.delta_inv; abcd_left[2] = 0.5 * grid.delta_inv; abcd_left[3] = bc.lVal; } if (bc.lCode == NATURAL || bc.lCode == DERIV2) { abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv; abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv; abcd_left[3] = bc.lVal; } // Right boundary if (bc.rCode == FLAT || bc.rCode == NATURAL) bc.rVal = 0.0; if (bc.rCode == FLAT || bc.rCode == DERIV1) { abcd_right[0] = -0.5 * grid.delta_inv; abcd_right[1] = 0.0 * grid.delta_inv; abcd_right[2] = 0.5 * grid.delta_inv; abcd_right[3] = bc.rVal; } if (bc.rCode == NATURAL || bc.rCode == DERIV2) { abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv; abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv; abcd_right[3] = bc.rVal; } double *bands = malloc ((M+2)*4*sizeof(double)); for (int i=0; i<4; i++) { bands[4*( 0 )+i] = abcd_left[i]; bands[4*(M+1)+i] = abcd_right[i]; } for (int i=0; i<M; i++) { for (int j=0; j<3; j++) bands[4*(i+1)+j] = basis[j]; bands[4*(i+1)+3] = data[i*dstride]; } // Now, solve for coefficients solve_deriv_interp_1d_d (bands, coefs, M, cstride); free (bands); } } UBspline_1d_d* create_UBspline_1d_d (Ugrid x_grid, BCtype_d xBC, double *data) { // Create new spline UBspline_1d_d* restrict spline = malloc (sizeof(UBspline_1d_d)); spline->spcode = U1D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*N); #else posix_memalign ((void**)&spline->coefs, 16, sizeof(double)*N); #endif if(data != NULL) // only data is provided find_coefs_1d_d (spline->x_grid, xBC, data, 1, spline->coefs, 1); init_sse_data(); return spline; } void recompute_UBspline_1d_d (UBspline_1d_d* spline, double *data) { find_coefs_1d_d (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1); } UBspline_2d_d* create_UBspline_2d_d (Ugrid x_grid, Ugrid y_grid, BCtype_d xBC, BCtype_d yBC, double *data) { // Create new spline UBspline_2d_d* restrict spline = malloc (sizeof(UBspline_2d_d)); spline->spcode = U2D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*Nx*Ny)); #endif // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } init_sse_data(); return spline; } void recompute_UBspline_2d_d (UBspline_2d_d* spline, double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = iy; intptr_t coffset = iy; find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My, spline->coefs+coffset, Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = ix*Ny; intptr_t coffset = ix*Ny; find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } UBspline_3d_d* create_UBspline_3d_d (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_d xBC, BCtype_d yBC, BCtype_d zBC, double *data) { // Create new spline UBspline_3d_d* restrict spline = malloc (sizeof(UBspline_3d_d)); spline->spcode = U3D; spline->tcode = DOUBLE_REAL; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (sizeof(double)*spline->coefs_size); #else posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*spline->coefs_size)); #endif if(data != NULL) // only data is provided { // First, solve in the X-direction #pragma omp parallel for for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_d (spline->z_grid, zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } init_sse_data(); return spline; } void recompute_UBspline_3d_d (UBspline_3d_d* spline, double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; // First, solve in the X-direction #pragma omp parallel for for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = iy*Mz+iz; intptr_t coffset = iy*Nz+iz; find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My*Mz, spline->coefs+coffset, Ny*Nz); } // Now, solve in the Y-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = ix*Ny*Nz + iz; intptr_t coffset = ix*Ny*Nz + iz; find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz, spline->coefs+coffset, Nz); } // Now, solve in the Z-direction #pragma omp parallel for for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = (ix*Ny+iy)*Nz; intptr_t coffset = (ix*Ny+iy)*Nz; find_coefs_1d_d (spline->z_grid, spline->zBC, spline->coefs+doffset, 1, spline->coefs+coffset, 1); } } //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// //// Double-Precision, Complex Creation Routines //// //////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////// // On input, bands should be filled with: // row 0 : abcdInitial from boundary conditions // rows 1:M: basis functions in first 3 cols, data in last // row M+1 : abcdFinal from boundary conditions // cstride gives the stride between values in coefs. // On exit, coefs with contain interpolating B-spline coefs UBspline_1d_z* create_UBspline_1d_z (Ugrid x_grid, BCtype_z xBC, complex_double *data) { // Create new spline UBspline_1d_z* restrict spline = malloc (sizeof(UBspline_1d_z)); spline->spcode = U1D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; // Setup internal variables int M = x_grid.num; int N; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num); N = M+3; } else { x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1); N = M+2; } x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*N); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*N); #endif BCtype_d xBC_r, xBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2, (double*)spline->coefs, 2); // Imaginarty part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2, ((double*)spline->coefs)+1, 2); init_sse_data(); return spline; } void recompute_UBspline_1d_z (UBspline_1d_z* spline, complex_double *data) { int M = spline->x_grid.num; int N; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) N = M+3; else N = M+2; BCtype_d xBC_r, xBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2, (double*)spline->coefs, 2); // Imaginarty part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2, ((double*)spline->coefs)+1, 2); } UBspline_2d_z* create_UBspline_2d_z (Ugrid x_grid, Ugrid y_grid, BCtype_z xBC, BCtype_z yBC, complex_double *data) { // Create new spline UBspline_2d_z* restrict spline = malloc (sizeof(UBspline_2d_z)); spline->spcode = U2D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Nx, Ny; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; spline->x_stride = Ny; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*Nx*Ny); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny); #endif BCtype_d xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My, (double*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My, ((double*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2, (double*)spline->coefs+coffset, 2); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_2d_z (UBspline_2d_z* spline, complex_double *data) { int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Nx, Ny; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; BCtype_d xBC_r, xBC_i, yBC_r, yBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) { intptr_t doffset = 2*iy; intptr_t coffset = 2*iy; // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My, (double*)spline->coefs+coffset, 2*Ny); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My, ((double*)spline->coefs)+coffset+1, 2*Ny); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) { intptr_t doffset = 2*ix*Ny; intptr_t coffset = 2*ix*Ny; // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2, (double*)spline->coefs+coffset, 2); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } } UBspline_3d_z* create_UBspline_3d_z (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid, BCtype_z xBC, BCtype_z yBC, BCtype_z zBC, complex_double *data) { // Create new spline UBspline_3d_z* restrict spline = malloc (sizeof(UBspline_3d_z)); spline->spcode = U3D; spline->tcode = DOUBLE_COMPLEX; spline->xBC = xBC; spline->yBC = yBC; spline->zBC = zBC; // Setup internal variables int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num; int Nx, Ny, Nz; if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3); x_grid.delta_inv = 1.0/x_grid.delta; spline->x_grid = x_grid; if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3); y_grid.delta_inv = 1.0/y_grid.delta; spline->y_grid = y_grid; if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3); z_grid.delta_inv = 1.0/z_grid.delta; spline->z_grid = z_grid; spline->x_stride = Ny*Nz; spline->y_stride = Nz; #ifndef HAVE_SSE2 spline->coefs = malloc (2*sizeof(double)*Nx*Ny*Nz); #else posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny*Nz); #endif BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode; xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r; xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode; xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i; yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode; yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r; yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode; yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i; zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode; zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r; zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode; zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz, ((double*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz, ((double*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz, ((double*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz, ((double*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2, ((double*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } init_sse_data(); return spline; } void recompute_UBspline_3d_z (UBspline_3d_z* spline, complex_double *data) { // Setup internal variables int Mx = spline->x_grid.num; int My = spline->y_grid.num; int Mz = spline->z_grid.num; int Nx, Ny, Nz; if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3; else Nx = Mx+2; if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3; else Ny = My+2; if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3; else Nz = Mz+2; BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i; xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode; xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r; xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode; xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i; yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode; yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r; yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode; yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i; zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode; zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r; zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode; zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i; // First, solve in the X-direction for (int iy=0; iy<My; iy++) for (int iz=0; iz<Mz; iz++) { intptr_t doffset = 2*(iy*Mz+iz); intptr_t coffset = 2*(iy*Nz+iz); // Real part find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz, ((double*)spline->coefs)+coffset, 2*Ny*Nz); // Imag part find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz, ((double*)spline->coefs)+coffset+1, 2*Ny*Nz); } // Now, solve in the Y-direction for (int ix=0; ix<Nx; ix++) for (int iz=0; iz<Nz; iz++) { intptr_t doffset = 2*(ix*Ny*Nz + iz); intptr_t coffset = 2*(ix*Ny*Nz + iz); // Real part find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz, ((double*)spline->coefs)+coffset, 2*Nz); // Imag part find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz, ((double*)spline->coefs)+coffset+1, 2*Nz); } // Now, solve in the Z-direction for (int ix=0; ix<Nx; ix++) for (int iy=0; iy<Ny; iy++) { intptr_t doffset = 2*((ix*Ny+iy)*Nz); intptr_t coffset = 2*((ix*Ny+iy)*Nz); // Real part find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2, ((double*)spline->coefs)+coffset, 2); // Imag part find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2, ((double*)spline->coefs)+coffset+1, 2); } } void destroy_UBspline (Bspline *spline) { free (spline->coefs); free (spline); } void destroy_NUBspline (Bspline *spline); void destroy_multi_UBspline (Bspline *spline); void destroy_Bspline (void *spline) { Bspline *sp = (Bspline *)spline; if (sp->sp_code <= U3D) destroy_UBspline (sp); else if (sp->sp_code <= NU3D) destroy_NUBspline (sp); else if (sp->sp_code <= MULTI_U3D) destroy_multi_UBspline (sp); else fprintf (stderr, "Error in destroy_Bspline: invalide spline code %d.\n", sp->sp_code); }
par_multi_interp.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" /*-------------------------------------------------------------------------- * hypre_ParAMGBuildMultipass * This routine implements Stuben's direct interpolation with multiple passes. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildMultipassHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int P_max_elmts, HYPRE_Int weight_option, hypre_ParCSRMatrix **P_ptr ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *tmp_comm_pkg; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = NULL; HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = NULL; //HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; /*HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = NULL;*/ HYPRE_Int num_cols_offd; hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_diag_j; hypre_CSRMatrix *P_offd; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row, finally will be pointer to start of row */ HYPRE_Int *P_offd_j = NULL; HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data = NULL; HYPRE_BigInt *big_buf_data = NULL; HYPRE_Int *send_map_start; HYPRE_Int *send_map_elmt; HYPRE_Int *send_procs; HYPRE_Int num_recvs = 0; HYPRE_Int *recv_vec_start; HYPRE_Int *recv_procs; HYPRE_Int *new_recv_vec_start = NULL; HYPRE_Int **Pext_send_map_start = NULL; HYPRE_Int **Pext_recv_vec_start = NULL; HYPRE_Int *Pext_start = NULL; HYPRE_Int *P_ncols = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; HYPRE_Int *P_marker; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *C_array; HYPRE_Int *C_array_offd = NULL; HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */ HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first point of pass j contained in pass_array */ HYPRE_Int *P_diag_start; HYPRE_Int *P_offd_start = NULL; HYPRE_Int **P_diag_pass; HYPRE_Int **P_offd_pass = NULL; HYPRE_Int **Pext_pass = NULL; HYPRE_BigInt *big_temp_pass = NULL; HYPRE_BigInt **new_elmts = NULL; /* new neighbors generated in each pass */ HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for each pass */ HYPRE_Int *loc = NULL; /* contains locations for new neighbor connections in int_o_buffer to avoid searching */ HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero cols of off proc neighbors */ HYPRE_BigInt *Pext_send_buffer = NULL; /* used to collect global nonzero col ids in P_diag for send_map_elmts */ HYPRE_Int *map_S_to_new = NULL; HYPRE_BigInt *new_col_map_offd = NULL; HYPRE_BigInt *col_map_offd_P = NULL; HYPRE_Int *permute = NULL; HYPRE_BigInt *big_permute = NULL; HYPRE_Int cnt; HYPRE_Int cnt_nz; HYPRE_Int total_nz; HYPRE_Int pass; HYPRE_Int num_passes; HYPRE_Int max_num_passes = 10; HYPRE_Int n_fine; HYPRE_Int n_coarse = 0; HYPRE_Int n_coarse_offd = 0; HYPRE_Int n_SF = 0; HYPRE_Int n_SF_offd = 0; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int *assigned = NULL; HYPRE_Int *assigned_offd = NULL; HYPRE_Real *Pext_send_data = NULL; HYPRE_Real *Pext_data = NULL; HYPRE_Real sum_C, sum_N; HYPRE_Real sum_C_pos, sum_C_neg; HYPRE_Real sum_N_pos, sum_N_neg; HYPRE_Real diagonal; HYPRE_Real alfa = 1.0; HYPRE_Real beta = 1.0; HYPRE_Int j_start; HYPRE_Int j_end; HYPRE_Int i, i1; HYPRE_Int j, j1; HYPRE_Int k, k1, k2, k3; HYPRE_BigInt big_k1; HYPRE_Int pass_array_size; HYPRE_BigInt global_pass_array_size; HYPRE_BigInt local_pass_array_size; HYPRE_Int my_id, num_procs; HYPRE_Int index, start; HYPRE_BigInt my_first_cpt; HYPRE_BigInt total_global_cpts; HYPRE_Int p_cnt; HYPRE_Int total_nz_offd; HYPRE_Int cnt_nz_offd; HYPRE_Int cnt_offd, cnt_new; HYPRE_Int no_break; HYPRE_Int not_found; HYPRE_Int Pext_send_size; HYPRE_Int Pext_recv_size; HYPRE_Int old_Pext_send_size; HYPRE_Int old_Pext_recv_size; HYPRE_Int P_offd_size = 0; HYPRE_Int local_index = -1; HYPRE_Int new_num_cols_offd = 0; HYPRE_Int num_cols_offd_P; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop; HYPRE_Int pass_length; HYPRE_Int *tmp_marker, *tmp_marker_offd; HYPRE_Int *tmp_array, *tmp_array_offd; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cnt_nz_per_thread; HYPRE_Int * cnt_nz_offd_per_thread; /* HYPRE_Real wall_time; wall_time = hypre_MPI_Wtime(); */ /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] = 0; cnt_nz_per_thread[i] = 0; } /*----------------------------------------------------------------------- * Access the CSR vectors for A and S. Also get size of fine grid. *-----------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); my_first_cpt = num_cpts_global[0]; /* total_global_cpts = 0; */ if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); if (!comm_pkg) { comm_pkg = hypre_ParCSRMatrixCommPkg(A); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } } //col_map_offd = col_map_offd_A; num_cols_offd = num_cols_offd_A; if (num_cols_offd_A) { A_offd_data = hypre_CSRMatrixData(A_offd); A_offd_j = hypre_CSRMatrixJ(A_offd); } if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } n_fine = hypre_CSRMatrixNumRows(A_diag); /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } n_coarse = 0; n_SF = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) if (CF_marker[i] == 1) { n_coarse++; } else if (CF_marker[i] == -3) { n_SF++; } pass_array_size = n_fine - n_coarse - n_SF; if (pass_array_size) { pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size, HYPRE_MEMORY_HOST); } pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes + 1, HYPRE_MEMORY_HOST); if (n_fine) { assigned = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); if (n_coarse) { C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); } if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); if (send_map_start[num_sends]) { int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, send_map_start[num_sends], HYPRE_MEMORY_HOST); } } index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { int_buf_data[index++] = CF_marker[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { int_buf_data[index++] = dof_func[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } n_coarse_offd = 0; n_SF_offd = 0; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd; i++) if (CF_marker_offd[i] == 1) { n_coarse_offd++; } else if (CF_marker_offd[i] == -3) { n_SF_offd++; } if (num_cols_offd) { assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, n_coarse_offd, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------------- * First Pass: determine the maximal size of P, and elementsPerRow[i]. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Assigned points are points for which we know an interpolation * formula already, and which are thus available to interpolate from. * assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending * in which pass their interpolation formula is determined. * * pass_array contains the points ordered according to its pass, i.e. * | C-points | points of pass 1 | points of pass 2 | .... * C_points are points 0 through pass_pointer[1]-1, * points of pass k (0 < k < num_passes) are contained in points * pass_pointer[k] through pass_pointer[k+1]-1 of pass_array . * * pass_array is also used to avoid going through all points for each pass, * i,e. at the bginning it contains all points in descending order starting * with n_fine-1. Then starting from the last point, we evaluate whether * it is a C_point (pass 0). If it is the point is brought to the front * and the length of the points to be searched is shortened. This is * done until the parameter cnt (which determines the first point of * pass_array to be searched) becomes n_fine. Then all points have been * assigned a pass number. *-----------------------------------------------------------------------*/ cnt = 0; p_cnt = pass_array_size - 1; P_diag_i[0] = 0; P_offd_i[0] = 0; for (i = 0; i < n_fine; i++) { if (CF_marker[i] == 1) { fine_to_coarse[i] = cnt; /* this C point is assigned index coarse_counter on coarse grid, and in column of P */ C_array[cnt++] = i; assigned[i] = 0; P_diag_i[i + 1] = 1; /* one element in row i1 of P */ P_offd_i[i + 1] = 0; } else if (CF_marker[i] == -1) { pass_array[p_cnt--] = i; P_diag_i[i + 1] = 0; P_offd_i[i + 1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } else { P_diag_i[i + 1] = 0; P_offd_i[i + 1] = 0; assigned[i] = -1; fine_to_coarse[i] = -1; } } index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { big_buf_data[index] = (HYPRE_BigInt)fine_to_coarse[send_map_elmt[j]]; if (big_buf_data[index] > -1) { big_buf_data[index] += my_first_cpt; } index++; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } new_recv_vec_start = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); if (n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); } cnt = 0; new_recv_vec_start[0] = 0; for (j = 0; j < num_recvs; j++) { for (i = recv_vec_start[j]; i < recv_vec_start[j + 1]; i++) { if (CF_marker_offd[i] == 1) { map_S_to_new[i] = cnt; C_array_offd[cnt] = i; new_col_map_offd[cnt++] = fine_to_coarse_offd[i]; assigned_offd[i] = 0; } else { assigned_offd[i] = -1; map_S_to_new[i] = -1; } } new_recv_vec_start[j + 1] = cnt; } cnt = 0; hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Mark all local neighbors of C points as 'assigned'. *-----------------------------------------------------------------------*/ pass_pointer[0] = 0; pass_pointer[1] = 0; total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */ total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */ cnt = 0; cnt_offd = 0; cnt_nz = 0; cnt_nz_offd = 0; for (i = pass_array_size - 1; i > cnt - 1; i--) { i1 = pass_array[i]; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_i[i1 + 1]++; cnt_nz++; assigned[i1] = 1; } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_i[i1 + 1]++; cnt_nz_offd++; assigned[i1] = 1; } } if (assigned[i1] == 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; } } pass_pointer[2] = cnt; /*----------------------------------------------------------------------- * All local neighbors are assigned, now need to exchange the boundary * info for assigned strong neighbors. *-----------------------------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } /*----------------------------------------------------------------------- * Now we need to determine strong neighbors of points of pass 1, etc. * we need to update assigned_offd after each pass *-----------------------------------------------------------------------*/ pass = 2; local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt); hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); while (global_pass_array_size && pass < max_num_passes) { for (i = pass_array_size - 1; i > cnt - 1; i--) { i1 = pass_array[i]; no_break = 1; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; no_break = 0; break; } } if (no_break) { for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { pass_array[i++] = pass_array[cnt]; pass_array[cnt++] = i1; assigned[i1] = pass; break; } } } } /*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/ pass++; pass_pointer[pass] = cnt; local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt); hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); index = 0; for (i = 0; i < num_sends; i++) { start = send_map_start[i]; for (j = start; j < send_map_start[i + 1]; j++) { int_buf_data[index++] = assigned[send_map_elmt[j]]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, assigned_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); num_passes = pass; P_diag_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); /* P_diag_pass[i] will contain all column numbers for points of pass i */ P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST); P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); /* P_diag_start[i] contains pointer to begin of column numbers in P_pass for point i, P_diag_i[i+1] contains number of columns for point i */ P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_procs > 1) { P_offd_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); if (cnt_nz_offd) { P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST); } else { P_offd_pass[1] = NULL; } new_elmts = hypre_CTAlloc(HYPRE_BigInt*, num_passes, HYPRE_MEMORY_HOST); new_counter = hypre_CTAlloc(HYPRE_Int, num_passes + 1, HYPRE_MEMORY_HOST); new_counter[0] = 0; new_counter[1] = n_coarse_offd; new_num_cols_offd = n_coarse_offd; new_elmts[0] = new_col_map_offd; } /*----------------------------------------------------------------------- * Pass 1: now we consider points of pass 1, with strong C_neighbors, *-----------------------------------------------------------------------*/ cnt_nz = 0; cnt_nz_offd = 0; /* JBS: Possible candidate for threading */ for (i = pass_pointer[1]; i < pass_pointer[2]; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (CF_marker[j1] == 1) { P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (CF_marker_offd[j1] == 1) { P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; } } } total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; if (num_procs > 1) { tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd + 1, HYPRE_MEMORY_HOST); if (num_cols_offd) { Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } if (send_map_start[num_sends]) { P_ncols = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_cols_offd + 1; i++) { Pext_i[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < send_map_start[num_sends]; i++) { P_ncols[i] = 0; } } old_Pext_send_size = 0; old_Pext_recv_size = 0; for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); Pext_send_size = 0; Pext_send_map_start[pass][0] = 0; for (i = 0; i < num_sends; i++) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE #endif for (j = send_map_start[i]; j < send_map_start[i + 1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass - 1) { P_ncols[j] = P_diag_i[j1 + 1] + P_offd_i[j1 + 1]; Pext_send_size += P_ncols[j]; } } Pext_send_map_start[pass][i + 1] = Pext_send_size; } comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg, P_ncols, &Pext_i[1]); hypre_ParCSRCommHandleDestroy(comm_handle); if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST); Pext_send_buffer = hypre_CTAlloc(HYPRE_BigInt, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; } cnt_offd = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_start[i]; j < send_map_start[i + 1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_start[j1]; j_end = j_start + P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_buffer[cnt_offd++] = my_first_cpt + (HYPRE_BigInt) P_diag_pass[pass - 1][k]; } j_start = P_offd_start[j1]; j_end = j_start + P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_offd_pass[pass - 1][k]; k3 = 0; while (k3 < pass - 1) { if (k1 < new_counter[k3 + 1]) { k2 = k1 - new_counter[k3]; Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2]; break; } k3++; } } } } } if (num_procs > 1) { Pext_recv_size = 0; Pext_recv_vec_start[pass][0] = 0; cnt_offd = 0; for (i = 0; i < num_recvs; i++) { for (j = recv_vec_start[i]; j < recv_vec_start[i + 1]; j++) { if (assigned_offd[j] == pass - 1) { Pext_start[j] = cnt_offd; cnt_offd += Pext_i[j + 1]; } } Pext_recv_size = cnt_offd; Pext_recv_vec_start[pass][i + 1] = Pext_recv_size; } hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm; hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; if (Pext_recv_size) { Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST); new_elmts[pass - 1] = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST); } else { Pext_pass[pass] = NULL; new_elmts[pass - 1] = NULL; } if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(loc, HYPRE_MEMORY_HOST); loc = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST); hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST); big_temp_pass = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (21, tmp_comm_pkg, Pext_send_buffer, big_temp_pass); hypre_ParCSRCommHandleDestroy(comm_handle); } cnt_new = 0; cnt_offd = 0; /* JBS: Possible candidate for threading */ for (i = 0; i < num_recvs; i++) { for (j = recv_vec_start[i]; j < recv_vec_start[i + 1]; j++) { if (assigned_offd[j] == pass - 1) { for (j1 = cnt_offd; j1 < cnt_offd + Pext_i[j + 1]; j1++) { big_k1 = big_temp_pass[j1]; k2 = (HYPRE_Int)(big_k1 - my_first_cpt); if (k2 > -1 && k2 < n_coarse) { Pext_pass[pass][j1] = -k2 - 1; } else { not_found = 1; k3 = 0; while (k3 < pass - 1 && not_found) { k2 = hypre_BigBinarySearch(new_elmts[k3], big_k1, (new_counter[k3 + 1] - new_counter[k3])); if (k2 > -1) { Pext_pass[pass][j1] = k2 + new_counter[k3]; not_found = 0; } else { k3++; } } if (not_found) { new_elmts[pass - 1][cnt_new] = big_k1; loc[cnt_new++] = j1; } } } cnt_offd += Pext_i[j + 1]; } } } if (cnt_new) { hypre_BigQsortbi(new_elmts[pass - 1], loc, 0, cnt_new - 1); cnt = 0; local_index = new_counter[pass - 1]; Pext_pass[pass][loc[0]] = local_index; for (i = 1; i < cnt_new; i++) { if (new_elmts[pass - 1][i] > new_elmts[pass - 1][cnt]) { new_elmts[pass - 1][++cnt] = new_elmts[pass - 1][i]; local_index++; } Pext_pass[pass][loc[i]] = local_index; } new_counter[pass] = local_index + 1; } else if (num_procs > 1) { new_counter[pass] = new_counter[pass - 1]; } if (new_num_cols_offd < local_index + 1) { new_num_cols_offd = local_index + 1; } pass_length = pass_pointer[pass + 1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd) #endif { /* Thread by computing the sparsity structure for this pass only over * each thread's range of rows. Rows are divided up evenly amongst * the threads. The necessary thread-wise temporary arrays, like * P_marker, are initialized and de-allocated internally to the * parallel region. */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_length; } else { thread_stop = (pass_length / num_threads) * (my_thread_num + 1); } thread_start += pass_pointer[pass]; thread_stop += pass_pointer[pass]; /* Local initializations */ cnt_nz = 0; cnt_nz_offd = 0; /* This block of code is to go to the top of the parallel region starting before * the loop over num_passes. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); /* marks points to see if they're counted */ for (i = 0; i < n_coarse; i++) { P_marker[i] = -1; } if (new_num_cols_offd == local_index + 1) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < new_num_cols_offd; i++) { P_marker_offd[i] = -1; } } else if (n_coarse_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_coarse_offd; i++) { P_marker_offd[i] = -1; } } /* Need some variables to store each threads cnt_nz and cnt_nz_offd, and * then stitch things together as in par_interp.c * This loop writes * P_diag_i, P_offd_i: data parallel here, and require no special treatment * P_diag_start, P_offd_start: are not data parallel, require special treatment */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] = cnt_nz; P_offd_start[i1] = cnt_nz_offd; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_start[j1]; j_end = j_start + P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_diag_pass[pass - 1][k]; if (P_marker[k1] != i1) { cnt_nz++; P_diag_i[i1 + 1]++; P_marker[k1] = i1; } } j_start = P_offd_start[j1]; j_end = j_start + P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_offd_pass[pass - 1][k]; if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1 + 1]++; P_marker_offd[k1] = i1; } } } } j_start = 0; for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { j_start = Pext_start[j1]; j_end = j_start + Pext_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1 - 1] != i1) { cnt_nz++; P_diag_i[i1 + 1]++; P_marker[-k1 - 1] = i1; } } else if (P_marker_offd[k1] != i1) { cnt_nz_offd++; P_offd_i[i1 + 1]++; P_marker_offd[k1] = i1; } } } } } /* Update P_diag_start, P_offd_start with cumulative * nonzero counts over all threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd; cnt_nz_per_thread[my_thread_num] = cnt_nz; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { for (i = 1; i < max_num_threads[0]; i++) { cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i - 1]; cnt_nz_per_thread[i] += cnt_nz_per_thread[i - 1]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { /* update this thread's section of P_diag_start and P_offd_start * with the num of nz's counted by previous threads */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; P_diag_start[i1] += cnt_nz_per_thread[my_thread_num - 1]; P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num - 1]; } } else /* if my_thread_num == 0 */ { /* Grab the nz count for all threads */ cnt_nz = cnt_nz_per_thread[max_num_threads[0] - 1]; cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0] - 1]; /* Updated total nz count */ total_nz += cnt_nz; total_nz_offd += cnt_nz_offd; /* Allocate P_diag_pass and P_offd_pass for all threads */ P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST); if (cnt_nz_offd) { P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST); } else if (num_procs > 1) { P_offd_pass[pass] = NULL; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* offset cnt_nz and cnt_nz_offd to point to the starting * point in P_diag_pass and P_offd_pass for each thread */ if (my_thread_num > 0) { cnt_nz = cnt_nz_per_thread[my_thread_num - 1]; cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num - 1]; } else { cnt_nz = 0; cnt_nz_offd = 0; } /* Set P_diag_pass and P_offd_pass */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_start[j1]; j_end = j_start + P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_diag_pass[pass - 1][k]; if (P_marker[k1] != -i1 - 1) { P_diag_pass[pass][cnt_nz++] = k1; P_marker[k1] = -i1 - 1; } } j_start = P_offd_start[j1]; j_end = j_start + P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = P_offd_pass[pass - 1][k]; if (P_marker_offd[k1] != -i1 - 1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1 - 1; } } } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { j_start = Pext_start[j1]; j_end = j_start + Pext_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; if (k1 < 0) { if (P_marker[-k1 - 1] != -i1 - 1) { P_diag_pass[pass][cnt_nz++] = -k1 - 1; P_marker[-k1 - 1] = -i1 - 1; } } else if (P_marker_offd[k1] != -i1 - 1) { P_offd_pass[pass][cnt_nz_offd++] = k1; P_marker_offd[k1] = -i1 - 1; } } } } } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if ( (n_coarse_offd) || (new_num_cols_offd == local_index + 1) ) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End parallel region */ } hypre_TFree(loc, HYPRE_MEMORY_HOST); hypre_TFree(P_ncols, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST); hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST); hypre_TFree(new_recv_vec_start, HYPRE_MEMORY_HOST); hypre_TFree(cnt_nz_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(cnt_nz_offd_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, total_nz, HYPRE_MEMORY_DEVICE); if (total_nz_offd) { P_offd_j = hypre_CTAlloc(HYPRE_Int, total_nz_offd, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, total_nz_offd, HYPRE_MEMORY_DEVICE); } for (i = 0; i < n_fine; i++) { P_diag_i[i + 1] += P_diag_i[i]; P_offd_i[i + 1] += P_offd_i[i]; } /* determine P for coarse points */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_coarse; i++) { i1 = C_array[i]; P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1]; P_diag_data[P_diag_i[i1]] = 1.0; } if (weight_option) /*if this is set, weights are separated into negative and positive offdiagonals and accumulated accordingly */ { pass_length = pass_pointer[2] - pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { P_marker_offd[i] = -1; } } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length / num_threads) * (my_thread_num + 1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_pos = 0; sum_C_neg = 0; sum_N_pos = 0; sum_N_neg = 0; j_start = P_diag_start[i1]; j_end = j_start + P_diag_i[i1 + 1] - P_diag_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; P_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j = A_diag_i[i1] + 1; j < A_diag_i[i1 + 1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) { sum_N_neg += A_diag_data[j]; } else { sum_N_pos += A_diag_data[j]; } } if (j1 != -1 && P_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; if (A_diag_data[j] < 0) { sum_C_neg += A_diag_data[j]; } else { sum_C_pos += A_diag_data[j]; } } } j_start = P_offd_start[i1]; j_end = j_start + P_offd_i[i1 + 1] - P_offd_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; P_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j = A_offd_i[i1]; j < A_offd_i[i1 + 1]; j++) { j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) { if (A_offd_data[j] < 0) { sum_N_neg += A_offd_data[j]; } else { sum_N_pos += A_offd_data[j]; } } if (j1 != -1 && P_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; if (A_offd_data[j] < 0) { sum_C_neg += A_offd_data[j]; } else { sum_C_pos += A_offd_data[j]; } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg * diagonal != 0) { alfa = -sum_N_neg / (sum_C_neg * diagonal); } if (sum_C_pos * diagonal != 0) { beta = -sum_N_pos / (sum_C_pos * diagonal); } for (j = P_diag_i[i1]; j < cnt; j++) if (P_diag_data[j] < 0) { P_diag_data[j] *= alfa; } else { P_diag_data[j] *= beta; } for (j = P_offd_i[i1]; j < cnt_offd; j++) if (P_offd_data[j] < 0) { P_offd_data[j] *= alfa; } else { P_offd_data[j] *= beta; } } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_cols_offd) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End Parallel Region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; if (n_coarse) { hypre_TFree(C_array, HYPRE_MEMORY_HOST); } hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST); } for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_start[i]; j < send_map_start[i + 1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST); } pass_length = pass_pointer[pass + 1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } if (num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { P_marker_offd[i] = -1; } } C_array = NULL; C_array_offd = NULL; if (n_coarse) { C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); } if (new_num_cols_offd > n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else if (n_coarse_offd) { C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length / num_threads) * (my_thread_num + 1); } /* Loop over each thread's row-range */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C_neg = 0; sum_C_pos = 0; sum_N_neg = 0; sum_N_pos = 0; j_start = P_diag_start[i1]; j_end = j_start + P_diag_i[i1 + 1] - P_diag_i[i1]; cnt = P_diag_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; C_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start + P_offd_i[i1 + 1] - P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; C_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { P_marker[j1] = i1; } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { P_marker_offd[j1] = i1; } } for (j = A_diag_i[i1] + 1; j < A_diag_i[i1 + 1]; j++) { j1 = A_diag_j[j]; if (P_marker[j1] == i1) { for (k = P_diag_i[j1]; k < P_diag_i[j1 + 1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j] * P_diag_data[k]; P_diag_data[C_array[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } for (k = P_offd_i[j1]; k < P_offd_i[j1 + 1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j] * P_offd_data[k]; P_offd_data[C_array_offd[k1]] += alfa; if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { if (A_diag_data[j] < 0) { sum_N_neg += A_diag_data[j]; } else { sum_N_pos += A_diag_data[j]; } } } } for (j = A_offd_i[i1]; j < A_offd_i[i1 + 1]; j++) { j1 = A_offd_j[j]; if (j1 > -1 && P_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start + Pext_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j] * Pext_data[k]; if (k1 < 0) { P_diag_data[C_array[-k1 - 1]] += alfa; } else { P_offd_data[C_array_offd[k1]] += alfa; } if (alfa < 0) { sum_C_neg += alfa; sum_N_neg += alfa; } else { sum_C_pos += alfa; sum_N_pos += alfa; } } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) { if ( A_offd_data[j] < 0) { sum_N_neg += A_offd_data[j]; } else { sum_N_pos += A_offd_data[j]; } } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C_neg * diagonal != 0) { alfa = -sum_N_neg / (sum_C_neg * diagonal); } if (sum_C_pos * diagonal != 0) { beta = -sum_N_pos / (sum_C_pos * diagonal); } for (j = P_diag_i[i1]; j < P_diag_i[i1 + 1]; j++) if (P_diag_data[j] < 0) { P_diag_data[j] *= alfa; } else { P_diag_data[j] *= beta; } for (j = P_offd_i[i1]; j < P_offd_i[i1 + 1]; j++) if (P_offd_data[j] < 0) { P_offd_data[j] *= alfa; } else { P_offd_data[j] *= beta; } } hypre_TFree(C_array, HYPRE_MEMORY_HOST); hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_cols_offd) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST); } } /* End num_passes for-loop */ } else /* no distinction between positive and negative offdiagonal element */ { pass_length = pass_pointer[2] - pass_pointer[1]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for pass one. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i = 0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[1] + (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_pointer[1] + pass_length; } else { thread_stop = pass_pointer[1] + (pass_length / num_threads) * (my_thread_num + 1); } /* determine P for points of pass 1, i.e. neighbors of coarse points */ for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start + P_diag_i[i1 + 1] - P_diag_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_diag_pass[1][j]; tmp_marker[C_array[k1]] = i1; } cnt = P_diag_i[i1]; for (j = A_diag_i[i1] + 1; j < A_diag_i[i1 + 1]; j++) { j1 = A_diag_j[j]; if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { sum_N += A_diag_data[j]; } if (j1 != -1 && tmp_marker[j1] == i1) { P_diag_data[cnt] = A_diag_data[j]; P_diag_j[cnt++] = fine_to_coarse[j1]; sum_C += A_diag_data[j]; } } j_start = P_offd_start[i1]; j_end = j_start + P_offd_i[i1 + 1] - P_offd_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_offd_pass[1][j]; tmp_marker_offd[C_array_offd[k1]] = i1; } cnt_offd = P_offd_i[i1]; for (j = A_offd_i[i1]; j < A_offd_i[i1 + 1]; j++) { j1 = A_offd_j[j]; if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func_offd[j1])) { sum_N += A_offd_data[j]; } if (j1 != -1 && tmp_marker_offd[j1] == i1) { P_offd_data[cnt_offd] = A_offd_data[j]; P_offd_j[cnt_offd++] = map_S_to_new[j1]; sum_C += A_offd_data[j]; } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C * diagonal != 0) { alfa = -sum_N / (sum_C * diagonal); } for (j = P_diag_i[i1]; j < cnt; j++) { P_diag_data[j] *= alfa; } for (j = P_offd_i[i1]; j < cnt_offd; j++) { P_offd_data[j] *= alfa; } } hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST); hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST); } /* end OMP parallel region */ old_Pext_send_size = 0; old_Pext_recv_size = 0; if (n_coarse) { hypre_TFree(C_array, HYPRE_MEMORY_HOST); } hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST); } for (pass = 2; pass < num_passes; pass++) { if (num_procs > 1) { Pext_send_size = Pext_send_map_start[pass][num_sends]; if (Pext_send_size > old_Pext_send_size) { hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST); } old_Pext_send_size = Pext_send_size; cnt_offd = 0; for (i = 0; i < num_sends; i++) { for (j = send_map_start[i]; j < send_map_start[i + 1]; j++) { j1 = send_map_elmt[j]; if (assigned[j1] == pass - 1) { j_start = P_diag_i[j1]; j_end = P_diag_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_diag_data[k]; } j_start = P_offd_i[j1]; j_end = P_offd_i[j1 + 1]; for (k = j_start; k < j_end; k++) { Pext_send_data[cnt_offd++] = P_offd_data[k]; } } } } hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends; hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = Pext_send_map_start[pass]; hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs; hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = Pext_recv_vec_start[pass]; Pext_recv_size = Pext_recv_vec_start[pass][num_recvs]; if (Pext_recv_size > old_Pext_recv_size) { hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST); } old_Pext_recv_size = Pext_recv_size; comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg, Pext_send_data, Pext_data); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST); } pass_length = pass_pointer[pass + 1] - pass_pointer[pass]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd) #endif { /* Sparsity structure is now finished. Next, calculate interpolation * weights for passes >= 2. Thread by computing the interpolation * weights only over each thread's range of rows. Rows are divided * up evenly amongst the threads. */ /* Initialize thread-wise variables */ tmp_marker = NULL; if (n_fine) { tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } tmp_marker_offd = NULL; if (num_cols_offd) { tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } tmp_array = NULL; if (n_coarse) { tmp_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); } tmp_array_offd = NULL; if (new_num_cols_offd > n_coarse_offd) { tmp_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else { tmp_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);} for (i = 0; i < n_fine; i++) { tmp_marker[i] = -1; } for (i = 0; i < num_cols_offd; i++) { tmp_marker_offd[i] = -1; } /* Compute this thread's range of pass_length */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); thread_start = pass_pointer[pass] + (pass_length / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { thread_stop = pass_pointer[pass] + pass_length; } else { thread_stop = pass_pointer[pass] + (pass_length / num_threads) * (my_thread_num + 1); } for (i = thread_start; i < thread_stop; i++) { i1 = pass_array[i]; sum_C = 0; sum_N = 0; j_start = P_diag_start[i1]; j_end = j_start + P_diag_i[i1 + 1] - P_diag_i[i1]; cnt = P_diag_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_diag_pass[pass][j]; tmp_array[k1] = cnt; P_diag_data[cnt] = 0; P_diag_j[cnt++] = k1; } j_start = P_offd_start[i1]; j_end = j_start + P_offd_i[i1 + 1] - P_offd_i[i1]; cnt_offd = P_offd_i[i1]; for (j = j_start; j < j_end; j++) { k1 = P_offd_pass[pass][j]; tmp_array_offd[k1] = cnt_offd; P_offd_data[cnt_offd] = 0; P_offd_j[cnt_offd++] = k1; } for (j = S_diag_i[i1]; j < S_diag_i[i1 + 1]; j++) { j1 = S_diag_j[j]; if (assigned[j1] == pass - 1) { tmp_marker[j1] = i1; } } for (j = S_offd_i[i1]; j < S_offd_i[i1 + 1]; j++) { j1 = S_offd_j[j]; if (assigned_offd[j1] == pass - 1) { tmp_marker_offd[j1] = i1; } } for (j = A_diag_i[i1] + 1; j < A_diag_i[i1 + 1]; j++) { j1 = A_diag_j[j]; if (tmp_marker[j1] == i1) { for (k = P_diag_i[j1]; k < P_diag_i[j1 + 1]; k++) { k1 = P_diag_j[k]; alfa = A_diag_data[j] * P_diag_data[k]; P_diag_data[tmp_array[k1]] += alfa; sum_C += alfa; sum_N += alfa; } for (k = P_offd_i[j1]; k < P_offd_i[j1 + 1]; k++) { k1 = P_offd_j[k]; alfa = A_diag_data[j] * P_offd_data[k]; P_offd_data[tmp_array_offd[k1]] += alfa; sum_C += alfa; sum_N += alfa; } } else { if (CF_marker[j1] != -3 && (num_functions == 1 || dof_func[i1] == dof_func[j1])) { sum_N += A_diag_data[j]; } } } for (j = A_offd_i[i1]; j < A_offd_i[i1 + 1]; j++) { j1 = A_offd_j[j]; if (j1 > -1 && tmp_marker_offd[j1] == i1) { j_start = Pext_start[j1]; j_end = j_start + Pext_i[j1 + 1]; for (k = j_start; k < j_end; k++) { k1 = Pext_pass[pass][k]; alfa = A_offd_data[j] * Pext_data[k]; if (k1 < 0) { P_diag_data[tmp_array[-k1 - 1]] += alfa; } else { P_offd_data[tmp_array_offd[k1]] += alfa; } sum_C += alfa; sum_N += alfa; } } else { if (CF_marker_offd[j1] != -3 && (num_functions == 1 || dof_func_offd[j1] == dof_func[i1])) { sum_N += A_offd_data[j]; } } } diagonal = A_diag_data[A_diag_i[i1]]; if (sum_C * diagonal != 0.0) { alfa = -sum_N / (sum_C * diagonal); } for (j = P_diag_i[i1]; j < P_diag_i[i1 + 1]; j++) { P_diag_data[j] *= alfa; } for (j = P_offd_i[i1]; j < P_offd_i[i1 + 1]; j++) { P_offd_data[j] *= alfa; } } hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST); hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_array, HYPRE_MEMORY_HOST); hypre_TFree(tmp_array_offd, HYPRE_MEMORY_HOST); } /* End OMP Parallel Section */ hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST); } } } hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_map_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_recv_vec_start, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST); hypre_TFree(Pext_data, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_pass, HYPRE_MEMORY_HOST); hypre_TFree(P_offd_pass, HYPRE_MEMORY_HOST); hypre_TFree(Pext_pass, HYPRE_MEMORY_HOST); hypre_TFree(P_diag_start, HYPRE_MEMORY_HOST); hypre_TFree(P_offd_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_start, HYPRE_MEMORY_HOST); hypre_TFree(Pext_i, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(assigned, HYPRE_MEMORY_HOST); hypre_TFree(assigned_offd, HYPRE_MEMORY_HOST); hypre_TFree(pass_pointer, HYPRE_MEMORY_HOST); hypre_TFree(pass_array, HYPRE_MEMORY_HOST); hypre_TFree(map_S_to_new, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; /* Compress P, removing coefficients smaller than trunc_factor * Max and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */ if (trunc_factor != 0.0 || P_max_elmts != 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); } P_offd_size = P_offd_i[n_fine]; num_cols_offd_P = 0; if (P_offd_size) { if (new_num_cols_offd > num_cols_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < new_num_cols_offd; i++) { P_marker_offd[i] = 0; } num_cols_offd_P = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker_offd[index]) { num_cols_offd_P++; P_marker_offd[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P, HYPRE_MEMORY_HOST); permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes - 1], HYPRE_MEMORY_HOST); big_permute = hypre_CTAlloc(HYPRE_BigInt, new_counter[num_passes - 1], HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < new_counter[num_passes - 1]; i++) { big_permute[i] = -1; } cnt = 0; for (i = 0; i < num_passes - 1; i++) { for (j = new_counter[i]; j < new_counter[i + 1]; j++) { if (P_marker_offd[j]) { col_map_offd_P[cnt] = new_elmts[i][j - (HYPRE_BigInt)new_counter[i]]; big_permute[j] = col_map_offd_P[cnt++]; } } } hypre_BigQsort0(col_map_offd_P, 0, num_cols_offd_P - 1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,big_k1) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < new_counter[num_passes - 1]; i++) { big_k1 = big_permute[i]; if (big_k1 != -1) { permute[i] = hypre_BigBinarySearch(col_map_offd_P, big_k1, num_cols_offd_P); } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < P_offd_size; i++) { P_offd_j[i] = permute[P_offd_j[i]]; } hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } if (num_procs > 1) { for (i = 0; i < num_passes - 1; i++) { hypre_TFree(new_elmts[i], HYPRE_MEMORY_HOST); } } hypre_TFree(permute, HYPRE_MEMORY_HOST); hypre_TFree(big_permute, HYPRE_MEMORY_HOST); hypre_TFree(new_elmts, HYPRE_MEMORY_HOST); hypre_TFree(new_counter, HYPRE_MEMORY_HOST); if (num_cols_offd_P) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P; } if (n_SF) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } } if (num_procs > 1) { hypre_MatvecCommPkgCreate(P); } *P_ptr = P; /* wall_time = hypre_MPI_Wtime() - wall_time; hypre_printf("TOTAL TIME %1.2e \n",wall_time); */ /*----------------------------------------------------------------------- * Build and return dof_func array for coarse grid. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Free mapping vector and marker array. *-----------------------------------------------------------------------*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] += hypre_MPI_Wtime(); #endif return (0); } HYPRE_Int hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int P_max_elmts, HYPRE_Int weight_option, hypre_ParCSRMatrix **P_ptr ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("MultipassInterp"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A), hypre_ParCSRMatrixMemoryLocation(S) ); if (exec == HYPRE_EXEC_DEVICE) { /* Notice: call the mod version on GPUs */ ierr = hypre_BoomerAMGBuildModMultipassDevice( A, CF_marker, S, num_cpts_global, trunc_factor, P_max_elmts, 9, num_functions, dof_func, P_ptr ); } else #endif { ierr = hypre_BoomerAMGBuildMultipassHost( A, CF_marker, S, num_cpts_global, num_functions, dof_func, debug_flag, trunc_factor, P_max_elmts, weight_option, P_ptr ); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; }
GB_unop__minv_int32_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_int32_int32) // op(A') function: GB (_unop_tran__minv_int32_int32) // C type: int32_t // A type: int32_t // cast: int32_t cij = aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CAST(z, aij) \ int32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = aij ; \ Cx [pC] = GB_IMINV_SIGNED (z, 32) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_int32_int32) ( int32_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 32) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; int32_t z = aij ; Cx [p] = GB_IMINV_SIGNED (z, 32) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_int32_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nodal_residualbased_elimination_builder_and_solver_for_FSI.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi, Alessandro Franci // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI ) #define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ // #define USE_GOOGLE_HASH #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "pfem_fluid_dynamics_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedEliminationBuilderAndSolverForFSI * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedEliminationBuilderAndSolverForFSI : public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverForFSI); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; typedef GlobalPointersVector<Node<3> > NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedEliminationBuilderAndSolverForFSI( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >(pNewLinearSystemSolver) { // KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverForFSI") << "Using the standard builder and solver " << std::endl; } /** Destructor. */ ~NodalResidualBasedEliminationBuilderAndSolverForFSI() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetMaterialPropertiesToFluid( ModelPart::NodeIterator itNode, double & density, double & deviatoricCoeff, double & volumetricCoeff, double timeInterval, double nodalVolume) { density =itNode->FastGetSolutionStepValue(DENSITY); deviatoricCoeff =itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double yieldShear=itNode->FastGetSolutionStepValue(YIELD_SHEAR); if(yieldShear>0){ double adaptiveExponent=itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate=itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent=-adaptiveExponent*equivalentStrainRate; if(equivalentStrainRate!=0){ deviatoricCoeff+=(yieldShear/equivalentStrainRate)*(1-exp(exponent)); } if(equivalentStrainRate<0.00001 && yieldShear!=0 && adaptiveExponent!=0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff=adaptiveExponent*yieldShear; } } volumetricCoeff=timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS); if(volumetricCoeff>0) { volumetricCoeff=timeInterval*itNode->FastGetSolutionStepValue(BULK_MODULUS); double bulkReduction=density*nodalVolume/(timeInterval*volumetricCoeff); volumetricCoeff*=bulkReduction; } } void SetMaterialPropertiesToSolid( ModelPart::NodeIterator itNode, double & density, double & deviatoricCoeff, double & volumetricCoeff, double timeInterval, double nodalVolume) { density =itNode->FastGetSolutionStepValue(SOLID_DENSITY); double youngModulus=itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio=itNode->FastGetSolutionStepValue(POISSON_RATIO); deviatoricCoeff = timeInterval*youngModulus/(1.0+poissonRatio)*0.5; volumetricCoeff = timeInterval*poissonRatio*youngModulus/((1.0+poissonRatio)*(1.0-2.0*poissonRatio)) + 2.0*deviatoricCoeff/3.0; } void BuildSolidNodally( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the system LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType solidEquationId; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; double theta=0.5; array_1d<double,3> Acc(3,0.0); // array_1d<double,6> Sigma(6,0.0); double dNdXi=0; double dNdYi=0; double dNdZi=0; double dNdXj=0; double dNdYj=0; double dNdZj=0; unsigned int firstRow=0; unsigned int firstCol=0; double density=0; double deviatoricCoeff=0; double volumetricCoeff=0; /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if(itNode->Is(SOLID)){ NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = solidNodalSFDneighboursId.size(); const double nodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); if(neighSize>1 && nodalVolume>0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size(); if (solidLHS_Contribution.size1() != localSize) solidLHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (solidRHS_Contribution.size() != localSize) solidRHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (solidEquationId.size() != localSize) solidEquationId.resize(localSize, false); solidLHS_Contribution= ZeroMatrix(localSize,localSize); solidRHS_Contribution= ZeroVector(localSize); this->SetMaterialPropertiesToSolid(itNode,density,deviatoricCoeff,volumetricCoeff,timeInterval,nodalVolume); density=itNode->FastGetSolutionStepValue(SOLID_DENSITY); firstRow=0; firstCol=0; if(dimension==2) { //////////////////////////// LHS TERMS ////////////////////////////// solidLHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval; solidLHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc=2.0*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION,0); solidRHS_Contribution[0]+=-nodalVolume*density*Acc[0]; solidRHS_Contribution[1]+=-nodalVolume*density*Acc[1]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; solidRHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]; solidRHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]; //-------- INTERNAL FORCES TERM -------// array_1d<double,3> Sigma(3,0.0); Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); solidEquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId(); solidEquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId(); for (unsigned int i = 0; i< neighSize; i++) { dNdXi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol+1]; solidRHS_Contribution[firstCol] += - nodalVolume * (dNdXi*Sigma[0] + dNdYi*Sigma[2]); solidRHS_Contribution[firstCol+1]+= - nodalVolume * (dNdYi*Sigma[1] + dNdXi*Sigma[2]); for (unsigned int j = 0; j< neighSize; j++) { dNdXj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow+1]; solidLHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff )*theta; firstRow+=2; } firstRow=0; firstCol+=2; unsigned int indexNode=i+1; if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true && indexNode<neighSize){ unsigned int other_neigh_nodes_id=solidNodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for(unsigned int k = 0; k< neighb_nodes.size(); k++) { unsigned int neigh_nodes_id=neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if(neigh_nodes_id==other_neigh_nodes_id){ solidEquationId[firstCol]=neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId(); solidEquationId[firstCol+1]=neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); break; } } }else if(i<neighb_nodes.size()) { solidEquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId(); solidEquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ }else if(dimension==3) { //////////////////////////// LHS TERMS ////////////////////////////// solidLHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval; solidLHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval; solidLHS_Contribution(2,2)+=nodalVolume*density*2.0/timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc=2.0*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION,0); solidRHS_Contribution[0]+=-nodalVolume*density*Acc[0]; solidRHS_Contribution[1]+=-nodalVolume*density*Acc[1]; solidRHS_Contribution[2]+=-nodalVolume*density*Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); solidRHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]; solidRHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]; solidRHS_Contribution[2]+=nodalVolume*density*VolumeAcceleration[2]; //-------- INTERNAL FORCES TERM -------// array_1d<double,3> Sigma(6,0.0); Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); solidEquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId(); solidEquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId(); solidEquationId[2]=itNode->GetDof(VELOCITY_Z,xDofPos+2).EquationId(); for (unsigned int i = 0; i< neighSize; i++) { dNdXi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol+1]; dNdZi=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol+2]; solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi*Sigma[0] + dNdYi*Sigma[3] + dNdZi*Sigma[4]); solidRHS_Contribution[firstCol+1]+= -nodalVolume * (dNdYi*Sigma[1] + dNdXi*Sigma[3] + dNdZi*Sigma[5]); solidRHS_Contribution[firstCol+2]+= -nodalVolume * (dNdZi*Sigma[2] + dNdXi*Sigma[4] + dNdYi*Sigma[5]); for (unsigned int j = 0; j< neighSize; j++) { dNdXj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow+1]; dNdZj=itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow+2]; solidLHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi )* deviatoricCoeff )*theta; solidLHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow,firstCol+2) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi )* deviatoricCoeff )*theta; solidLHS_Contribution(firstRow+1,firstCol+2)+= nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow+2,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow+2,firstCol+1)+= nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff )*theta; solidLHS_Contribution(firstRow+2,firstCol+2)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi )* deviatoricCoeff )*theta; firstRow+=3; } firstRow=0; firstCol+=3; unsigned int indexNode=i+1; if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true && indexNode<neighSize){ unsigned int other_neigh_nodes_id=solidNodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for(unsigned int k = 0; k< neighb_nodes.size(); k++) { unsigned int neigh_nodes_id=neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if(neigh_nodes_id==other_neigh_nodes_id){ solidEquationId[firstCol]=neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId(); solidEquationId[firstCol+1]=neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); solidEquationId[firstCol+2]=neighb_nodes[k].GetDof(VELOCITY_Z,xDofPos+2).EquationId(); break; } } }else if(i<neighb_nodes.size()) { solidEquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId(); solidEquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); solidEquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xDofPos+2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array); #else Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId); #endif } } } // } KRATOS_CATCH("") } void BuildFluidNodally( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; /* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */ //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; double theta=0.5; array_1d<double,3> Acc(3,0.0); // array_1d<double,6> Sigma(6,0.0); double pressure=0; double dNdXi=0; double dNdYi=0; double dNdZi=0; double dNdXj=0; double dNdYj=0; double dNdZj=0; unsigned int firstRow=0; unsigned int firstCol=0; double density=0; double deviatoricCoeff=0; double volumetricCoeff=0; /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true) { NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = nodalSFDneighboursId.size(); const double nodalVolume=itNode->FastGetSolutionStepValue(NODAL_VOLUME); if(neighSize>1 && nodalVolume>0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); if (LHS_Contribution.size1() != localSize) LHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (RHS_Contribution.size() != localSize) RHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (EquationId.size() != localSize) EquationId.resize(localSize, false); LHS_Contribution= ZeroMatrix(localSize,localSize); RHS_Contribution= ZeroVector(localSize); this->SetMaterialPropertiesToFluid(itNode,density,deviatoricCoeff,volumetricCoeff,timeInterval,nodalVolume); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // // std::cout<<"density,deviatoricCoeff,volumetricCoeff "<<density<<" "<<deviatoricCoeff<<" "<<volumetricCoeff<<std::endl; // std::cout<<"INTERFACE nodalVolume "<<nodalVolume<<std::endl; // }else{ // std::cout<<"nodalVolume "<<nodalVolume<<std::endl; // } firstRow=0; firstCol=0; if(dimension==2) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval; LHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc=2.0*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION,0); RHS_Contribution[0]+=-nodalVolume*density*Acc[0]; RHS_Contribution[1]+=-nodalVolume*density*Acc[1]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]; RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]; //-------- INTERNAL FORCES TERM -------// array_1d<double,3> Sigma(3,0.0); Sigma=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } if(itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true) { pressure=itNode->FastGetSolutionStepValue(PRESSURE,0)*theta+itNode->FastGetSolutionStepValue(PRESSURE,1)*(1-theta); Sigma[0]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId(); for (unsigned int i = 0; i< neighSize; i++) { dNdXi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+1]; RHS_Contribution[firstCol] += - nodalVolume * (dNdXi*Sigma[0] + dNdYi*Sigma[2]); RHS_Contribution[firstCol+1]+= - nodalVolume * (dNdYi*Sigma[1] + dNdXi*Sigma[2]); for (unsigned int j = 0; j< neighSize; j++) { dNdXj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+1]; LHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff )*theta; LHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff )*theta; LHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff )*theta; LHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff )*theta; firstRow+=2; } firstRow=0; firstCol+=2; unsigned int indexNode=i+1; if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true && indexNode<neighSize){ unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for(unsigned int k = 0; k< neighb_nodes.size(); k++) { unsigned int neigh_nodes_id=neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if(neigh_nodes_id==other_neigh_nodes_id){ EquationId[firstCol]=neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[firstCol+1]=neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); break; } } }else if(i<neighb_nodes.size()) { EquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ }else if(dimension==3) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0,0)+=nodalVolume*density*2.0/timeInterval; LHS_Contribution(1,1)+=nodalVolume*density*2.0/timeInterval; LHS_Contribution(2,2)+=nodalVolume*density*2.0/timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc=2.0*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION,0); RHS_Contribution[0]+=-nodalVolume*density*Acc[0]; RHS_Contribution[1]+=-nodalVolume*density*Acc[1]; RHS_Contribution[2]+=-nodalVolume*density*Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3 >& VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]; RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]; RHS_Contribution[2]+=nodalVolume*density*VolumeAcceleration[2]; //-------- INTERNAL FORCES TERM -------// array_1d<double,3> Sigma(6,0.0); Sigma=itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } if(itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true) { pressure=itNode->FastGetSolutionStepValue(PRESSURE,0)*theta+itNode->FastGetSolutionStepValue(PRESSURE,1)*(1-theta); Sigma[0]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; Sigma[2]=itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure; } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId(); EquationId[2]=itNode->GetDof(VELOCITY_Z,xDofPos+2).EquationId(); for (unsigned int i = 0; i< neighSize; i++) { dNdXi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+1]; dNdZi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+2]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi*Sigma[0] + dNdYi*Sigma[3] + dNdZi*Sigma[4]); RHS_Contribution[firstCol+1]+= -nodalVolume * (dNdYi*Sigma[1] + dNdXi*Sigma[3] + dNdZi*Sigma[5]); RHS_Contribution[firstCol+2]+= -nodalVolume * (dNdZi*Sigma[2] + dNdXi*Sigma[4] + dNdYi*Sigma[5]); for (unsigned int j = 0; j< neighSize; j++) { dNdXj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+1]; dNdZj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+2]; LHS_Contribution(firstRow,firstCol) += nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi )* deviatoricCoeff )*theta; LHS_Contribution(firstRow,firstCol+1) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff )*theta; LHS_Contribution(firstRow,firstCol+2) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff )*theta; LHS_Contribution(firstRow+1,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff )*theta; LHS_Contribution(firstRow+1,firstCol+1)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi )* deviatoricCoeff )*theta; LHS_Contribution(firstRow+1,firstCol+2)+= nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff )*theta; LHS_Contribution(firstRow+2,firstCol) += nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff )*theta; LHS_Contribution(firstRow+2,firstCol+1)+= nodalVolume * ( (nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff )*theta; LHS_Contribution(firstRow+2,firstCol+2)+= nodalVolume * ( (FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi )* deviatoricCoeff )*theta; firstRow+=3; } firstRow=0; firstCol+=3; unsigned int indexNode=i+1; if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true && indexNode<neighSize){ unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for(unsigned int k = 0; k< neighb_nodes.size(); k++) { unsigned int neigh_nodes_id=neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if(neigh_nodes_id==other_neigh_nodes_id){ EquationId[firstCol]=neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[firstCol+1]=neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); EquationId[firstCol+2]=neighb_nodes[k].GetDof(VELOCITY_Z,xDofPos+2).EquationId(); break; } } }else if(i<neighb_nodes.size()) { EquationId[firstCol]=neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[firstCol+1]=neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); EquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xDofPos+2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } // } KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, ModelPart& rModelPart ) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() ) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY Timer::Start("Build"); // boost::timer m_build_time; BuildSolidNodally(pScheme, rModelPart, A, b); BuildFluidNodally(pScheme, rModelPart, A, b); // std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl; Timer::Stop("Build"); // ApplyPointLoads(pScheme,rModelPart,b); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; const double start_solve = OpenMPUtils::GetCurrentTime(); Timer::Start("Solve"); /* boost::timer m_solve_time; */ SystemSolveWithPhysics(A, Dx, b, rModelPart); /* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */ Timer::Stop("Solve"); const double stop_solve = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType& pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = OpenMPUtils::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } #pragma omp parallel for firstprivate(nelements, ElementalDofList) for (int i = 0; i < static_cast<int>(nelements); i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } ConditionsArrayType& pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp parallel for firstprivate(nconditions, ElementalDofList) for (int i = 0; i < nconditions; i++) { typename ConditionsArrayType::iterator it = pConditions.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } //here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5*static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { // //just for debugging // std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; // for (int i = 0; i < new_max; i++) // { // if (i + new_max < old_max) // { // std::cout << i << " - " << i + new_max << std::endl; // } // } // std::cout << "********************" << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5*static_cast<double>(old_max)); } DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back(it->get()); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_init_lock(&mlock_array[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart& rModelPart ) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY // boost::timer m_contruct_matrix; if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructureForFSI(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructureForFSI(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if (BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize, false); } // std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); if (this->mpReactionsVector != NULL) TSparseSpace::Clear((this->mpReactionsVector)); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart& rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void Assemble( TSystemMatrixType& A, TSystemVectorType& b, const LocalSystemMatrixType& LHS_Contribution, const LocalSystemVectorType& RHS_Contribution, const Element::EquationIdVectorType& EquationId #ifdef _OPENMP ,std::vector< omp_lock_t >& lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&lock_array[i_global]); #endif b[i_global] += RHS_Contribution(i_local); for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } #ifdef _OPENMP omp_unset_lock(&lock_array[i_global]); #endif } //note that assembly on fixed rows is not performed here } } //************************************************************************** virtual void ConstructMatrixStructureForFSI( typename TSchemeType::Pointer pScheme, TSystemMatrixType& A, ModelPart& rModelPart) { //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); const std::size_t equation_size = BaseType::mEquationSystemSize; #ifdef USE_GOOGLE_HASH std::vector<google::dense_hash_set<std::size_t> > indices(equation_size); const std::size_t empty_key = 2 * equation_size + 10; #else std::vector<std::unordered_set<std::size_t> > indices(equation_size); #endif #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { #ifdef USE_GOOGLE_HASH indices[iii].set_empty_key(empty_key); #else indices[iii].reserve(40); #endif } Element::EquationIdVectorType EquationId; ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if(itNode->Is(SOLID)){ const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol=0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId(); if(dimension==3) EquationId[2]=itNode->GetDof(VELOCITY_Z,xDofPos+2).EquationId(); if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i< neighb_nodes.size(); i++) { unsigned int indexNode=i+1; if(indexNode<neighSize){ unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode]; firstCol+=dimension; for (unsigned int k = 0; k< neighb_nodes.size(); k++) { unsigned int neigh_nodes_id=neighb_nodes[k].Id(); if(neigh_nodes_id==other_neigh_nodes_id){ EquationId[firstCol] =neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[firstCol+1] =neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); if(dimension==3){ EquationId[firstCol+2]=neighb_nodes[k].GetDof(VELOCITY_Z,xDofPos+2).EquationId(); } break; } } } } }else{ NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i< neighb_nodes.size(); i++) { firstCol+=dimension; EquationId[firstCol] =neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[firstCol+1] =neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); if(dimension==3){ EquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xDofPos+2).EquationId(); } } } } if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol=0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0]=itNode->GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[1]=itNode->GetDof(VELOCITY_Y,xDofPos+1).EquationId(); if(dimension==3) EquationId[2]=itNode->GetDof(VELOCITY_Z,xDofPos+2).EquationId(); if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i< neighb_nodes.size(); i++) { unsigned int indexNode=i+1; if(indexNode<neighSize){ unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode]; firstCol+=dimension; for (unsigned int k = 0; k< neighb_nodes.size(); k++) { unsigned int neigh_nodes_id=neighb_nodes[k].Id(); if(neigh_nodes_id==other_neigh_nodes_id){ EquationId[firstCol] =neighb_nodes[k].GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[firstCol+1] =neighb_nodes[k].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); if(dimension==3){ EquationId[firstCol+2]=neighb_nodes[k].GetDof(VELOCITY_Z,xDofPos+2).EquationId(); } break; } } } } }else{ NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i< neighb_nodes.size(); i++) { firstCol+=dimension; EquationId[firstCol] =neighb_nodes[i].GetDof(VELOCITY_X,xDofPos).EquationId(); EquationId[firstCol+1] =neighb_nodes[i].GetDof(VELOCITY_Y,xDofPos+1).EquationId(); if(dimension==3){ EquationId[firstCol+2]=neighb_nodes[i].GetDof(VELOCITY_Z,xDofPos+2).EquationId(); } } } } for (std::size_t i = 0; i < EquationId.size(); i++) { if (EquationId[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[EquationId[i]]); #endif auto& row_indices = indices[EquationId[i]]; for (auto it = EquationId.begin(); it != EquationId.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[EquationId[i]]); #endif } } } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel for firstprivate(nconditions, ids) for (int iii = 0; iii<nconditions; iii++) { typename ConditionsArrayType::iterator i_condition = cond_begin + iii; pScheme->Condition_EquationId( *(i_condition.base()) , ids, CurrentProcessInfo); for (std::size_t i = 0; i < ids.size(); i++) { if (ids[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[ids[i]]); #endif auto& row_indices = indices[ids[i]]; for (auto it = ids.begin(); it != ids.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[ids[i]]); #endif } } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double* Avalues = A.value_data().begin(); std::size_t* Arow_indices = A.index1_data().begin(); std::size_t* Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } void AssembleLHS( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ #ifdef _OPENMP std::vector< omp_lock_t > mlock_array; #endif ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void AssembleRHS( TSystemVectorType& b, const LocalSystemVectorType& RHS_Contribution, const Element::EquationIdVectorType& EquationId ) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double& b_value = b[i_global]; const double& rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double& b_value = b[i_global]; const double& rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } else //fixed dof { double& b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize]; const double& rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedEliminationBuilderAndSolverForFSI */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
GB_unop__frexpx_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__frexpx_fp32_fp32) // op(A') function: GB (_unop_tran__frexpx_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_frexpxf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_frexpxf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_frexpxf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FREXPX || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__frexpx_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpxf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpxf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__frexpx_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
syrk.c
/** * syrk.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <[email protected]> * Rafael Cardoso F Sousa <[email protected]> * Luís Felipe Mattos <[email protected]> */ #include <assert.h> #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #include "../../common/polybenchUtilFuncts.h" // define the error threshold for the results "not matching" #define ERROR_THRESHOLD 0.05 #define GPU 1 /* Problem size */ #define N 1024 #define M 1024 /* Declared constant values for alpha and beta */ /* (same as values in PolyBench 2.0) */ #define alpha 12435 #define beta 4546 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *A, DATA_TYPE *C, DATA_TYPE *D) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { A[i * M + j] = ((DATA_TYPE)i * j) / N; } for (j = 0; j < M; j++) { C[i * M + j] = ((DATA_TYPE)i * j + 2) / N; D[i * M + j] = ((DATA_TYPE)i * j + 2) / N; } } } void compareResults(DATA_TYPE *C, DATA_TYPE *D) { int i, j, fail; fail = 0; // Compare C with D for (i = 0; i < N; i++) { for (j = 0; j < M; j++) { if (percentDiff(C[i * M + j], D[i * M + j]) > ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); } void syrk(DATA_TYPE *A, DATA_TYPE *C) { for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { C[i * M + j] *= beta; } } for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { for (int k = 0; k < M; k++) { C[i * N + j] += alpha * A[i * M + k] * A[j * M + k]; } } } } void syrkGPU(DATA_TYPE *A, DATA_TYPE *D) { #pragma omp target device(GPU) map(to : A[:N * M]) map(tofrom : D[:N * M]) { #pragma omp parallel for collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { D[i * M + j] *= beta; } } #pragma omp parallel for collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) { for (int k = 0; k < M; k++) { D[i * M + j] += alpha * A[i * M + k] * A[j * M + k]; } } } } } int main() { double t_start, t_end, t_start_GPU, t_end_GPU; DATA_TYPE *A; DATA_TYPE *C; DATA_TYPE *D; A = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); C = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); D = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); fprintf(stdout, "<< Symmetric rank-k operations >>\n"); init_arrays(A, C, D); t_start_GPU = rtclock(); syrkGPU(A, D); t_end_GPU = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_GPU - t_start_GPU); t_start = rtclock(); syrk(A, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, D); free(A); free(C); free(D); return 0; }
distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd safelen(4) void test_no_clause() { int i; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{statement after '#pragma omp distribute simd' must be a for loop}} #pragma omp distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as reduction}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp target #pragma omp teams for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp distribute simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction( : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(+ for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+: for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(k) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(val(k)) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(uval(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(ref(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(foo(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; }
parallel_for.h
/* Copyright (c) 2013, Taiga Nomi and the respective contributors All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #pragma once #include <cassert> #include <cstdio> #include <limits> #include <string> #include <type_traits> #include <vector> #include "aligned_allocator.h" #include "nn_error.h" #include "tiny_dnn/config.h" #ifdef CNN_USE_TBB #ifndef NOMINMAX #define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h #endif #include <tbb/task_group.h> #include <tbb/tbb.h> #endif #if !defined(CNN_USE_OMP) && !defined(CNN_SINGLE_THREAD) #include <future> #include <thread> #endif #if defined(CNN_USE_GCD) && !defined(CNN_SINGLE_THREAD) #include <dispatch/dispatch.h> #endif namespace tiny_dnn { #ifdef CNN_USE_TBB static tbb::task_scheduler_init tbbScheduler( tbb::task_scheduler_init::automatic); // tbb::task_scheduler_init::deferred); typedef tbb::blocked_range<size_t> blocked_range; template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) { assert(end >= begin); tbb::parallel_for( blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f); } template <typename Func> void xparallel_for(size_t begin, size_t end, const Func &f) { f(blocked_range(begin, end, 100)); } #else struct blocked_range { typedef size_t const_iterator; blocked_range(size_t begin, size_t end) : begin_(begin), end_(end) {} blocked_range(int begin, int end) : begin_(static_cast<size_t>(begin)), end_(static_cast<size_t>(end)) {} const_iterator begin() const { return begin_; } const_iterator end() const { return end_; } private: size_t begin_; size_t end_; }; template <typename Func> void xparallel_for(size_t begin, size_t end, const Func &f) { blocked_range r(begin, end); f(r); } #if defined(CNN_USE_OMP) template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t /*grainsize*/) { assert(end >= begin); #pragma omp parallel for for (size_t i = begin; i < end; ++i) f(blocked_range(i, i + 1)); } #elif defined(CNN_USE_GCD) template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t grainsize) { assert(end >= begin); size_t count = end - begin; size_t blockSize = grainsize; if (count < blockSize || blockSize == 0) { blockSize = 1; } size_t blockCount = (count + blockSize - 1) / blockSize; assert(blockCount > 0); dispatch_apply(blockCount, dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^(size_t block) { size_t blockStart = block * blockSize; size_t blockEnd = blockStart + blockSize; if (blockEnd > end) { blockEnd = end; } assert(blockStart < blockEnd); f(blocked_range(blockStart, blockEnd)); }); } #elif defined(CNN_SINGLE_THREAD) template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t /*grainsize*/) { xparallel_for(begin, end, f); } #else template <typename Func> void parallel_for(size_t begin, size_t end, const Func &f, size_t /*grainsize*/) { assert(end >= begin); size_t nthreads = std::thread::hardware_concurrency(); size_t blockSize = (end - begin) / nthreads; if (blockSize * nthreads < end - begin) blockSize++; std::vector<std::future<void> > futures; size_t blockBegin = begin; size_t blockEnd = blockBegin + blockSize; if (blockEnd > end) blockEnd = end; for (size_t i = 0; i < nthreads; i++) { futures.push_back( std::move(std::async(std::launch::async, [blockBegin, blockEnd, &f] { f(blocked_range(blockBegin, blockEnd)); }))); blockBegin += blockSize; blockEnd = blockBegin + blockSize; if (blockBegin >= end) break; if (blockEnd > end) blockEnd = end; } for (auto &future : futures) future.wait(); } #endif #endif // CNN_USE_TBB template <typename T, typename U> bool value_representation(U const &value) { return static_cast<U>(static_cast<T>(value)) == value; } template <typename T, typename Func> inline void for_(std::true_type, bool parallelize, size_t begin, T end, Func f, int grainsize = 100) { parallelize = parallelize && value_representation<size_t>(end); parallelize ? parallel_for(begin, static_cast<size_t>(end), f, grainsize) : xparallel_for(begin, static_cast<size_t>(end), f); } template <typename T, typename Func> inline void for_(std::false_type, bool parallelize, size_t begin, T end, Func f, int grainsize = 100) { parallelize ? parallel_for(begin, static_cast<size_t>(end), f, grainsize) : xparallel_for(begin, end, f); } template <typename T, typename Func> inline void for_( bool parallelize, size_t begin, T end, Func f, size_t grainsize = 100) { static_assert(std::is_integral<T>::value, "end must be integral type"); for_(typename std::is_unsigned<T>::type(), parallelize, begin, end, f, grainsize); } template <typename T, typename Func> inline void for_i(bool parallelize, T size, Func f, size_t grainsize = 100) { #ifdef CNN_SINGLE_THREAD for (size_t i = 0; i < (size_t)size; ++i) { f(i); } #else // #ifdef CNN_SINGLE_THREAD for_(parallelize, 0, size, [&](const blocked_range &r) { #ifdef CNN_USE_OMP #pragma omp parallel for #endif for (size_t i = r.begin(); i < r.end(); i++) { f(i); } }, grainsize); #endif // #ifdef CNN_SINGLE_THREAD } template <typename T, typename Func> inline void for_i(T size, Func f, size_t grainsize = 100) { for_i(true, size, f, grainsize); } } // namespace tiny_dnn
ast-dump-openmp-begin-declare-variant_1.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int also_before(void) { return 0; } #pragma omp begin declare variant match(device={kind(gpu)}) int also_after(void) { return 1; } int also_before(void) { return 2; } #pragma omp end declare variant #pragma omp begin declare variant match(device={kind(fpga)}) This text is never parsed! #pragma omp end declare variant int also_after(void) { return 0; } int test(void) { // Should return 0. return also_after() + also_before(); } // Make sure: // - we do not see the ast nodes for the gpu kind // - we do not choke on the text in the kind(fpga) guarded scopes // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_4:0x[a-z0-9]*]] <line:25:1, line:27:1> line:25:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_5:0x[a-z0-9]*]] <col:22, line:27:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_6:0x[a-z0-9]*]] <line:26:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_7:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: `-FunctionDecl [[ADDR_8:0x[a-z0-9]*]] <line:29:1, line:32:1> line:29:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_9:0x[a-z0-9]*]] <col:16, line:32:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_10:0x[a-z0-9]*]] <line:31:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_11:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-CallExpr [[ADDR_12:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_14:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_4]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_15:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_16:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_17:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
convolution-2d.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <[email protected]> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) collapse(2) schedule(#P1, #P2) num_threads(#P3) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
yescrypt-simd.c
/*- * Copyright 2009 Colin Percival * Copyright 2012-2015 Alexander Peslyak * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ /* * On 64-bit, enabling SSE4.1 helps our pwxform code indirectly, via avoiding * gcc bug 54349 (fixed for gcc 4.9+). On 32-bit, it's of direct help. AVX * and XOP are of further help either way. */ #include <emmintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif #include <errno.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include "sha256.h" #include "sysendian.h" #include "yescrypt.h" #include "yescrypt-platform.c" #if __STDC_VERSION__ >= 199901L /* have restrict */ #elif defined(__GNUC__) #define restrict __restrict #else #define restrict #endif #ifdef __GNUC__ #define unlikely(exp) __builtin_expect(exp, 0) #else #define unlikely(exp) (exp) #endif #define PREFETCH(x, hint) _mm_prefetch((const char *)(x), (hint)); #ifdef __XOP__ #define ARX(out, in1, in2, s) \ out = _mm_xor_si128(out, _mm_roti_epi32(_mm_add_epi32(in1, in2), s)); #else #define ARX(out, in1, in2, s) \ { \ __m128i T = _mm_add_epi32(in1, in2); \ out = _mm_xor_si128(out, _mm_slli_epi32(T, s)); \ out = _mm_xor_si128(out, _mm_srli_epi32(T, 32-s)); \ } #endif #define SALSA20_2ROUNDS \ /* Operate on "columns" */ \ ARX(X1, X0, X3, 7) \ ARX(X2, X1, X0, 9) \ ARX(X3, X2, X1, 13) \ ARX(X0, X3, X2, 18) \ \ /* Rearrange data */ \ X1 = _mm_shuffle_epi32(X1, 0x93); \ X2 = _mm_shuffle_epi32(X2, 0x4E); \ X3 = _mm_shuffle_epi32(X3, 0x39); \ \ /* Operate on "rows" */ \ ARX(X3, X0, X1, 7) \ ARX(X2, X3, X0, 9) \ ARX(X1, X2, X3, 13) \ ARX(X0, X1, X2, 18) \ \ /* Rearrange data */ \ X1 = _mm_shuffle_epi32(X1, 0x39); \ X2 = _mm_shuffle_epi32(X2, 0x4E); \ X3 = _mm_shuffle_epi32(X3, 0x93); /** * Apply the Salsa20/2 core to the block provided in (X0 ... X3). */ #define SALSA20_2(out) \ { \ __m128i Y0 = X0; \ __m128i Y1 = X1; \ __m128i Y2 = X2; \ __m128i Y3 = X3; \ SALSA20_2ROUNDS \ (out)[0] = X0 = _mm_add_epi32(X0, Y0); \ (out)[1] = X1 = _mm_add_epi32(X1, Y1); \ (out)[2] = X2 = _mm_add_epi32(X2, Y2); \ (out)[3] = X3 = _mm_add_epi32(X3, Y3); \ } /** * Apply the Salsa20/8 core to the block provided in (X0 ... X3) ^ (Z0 ... Z3). */ #define SALSA20_8_XOR_ANY(maybe_decl, Z0, Z1, Z2, Z3, out) \ X0 = _mm_xor_si128(X0, Z0); \ X1 = _mm_xor_si128(X1, Z1); \ X2 = _mm_xor_si128(X2, Z2); \ X3 = _mm_xor_si128(X3, Z3); \ { \ maybe_decl Y0 = X0; \ maybe_decl Y1 = X1; \ maybe_decl Y2 = X2; \ maybe_decl Y3 = X3; \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ SALSA20_2ROUNDS \ (out)[0] = X0 = _mm_add_epi32(X0, Y0); \ (out)[1] = X1 = _mm_add_epi32(X1, Y1); \ (out)[2] = X2 = _mm_add_epi32(X2, Y2); \ (out)[3] = X3 = _mm_add_epi32(X3, Y3); \ } #define SALSA20_8_XOR_MEM(in, out) \ SALSA20_8_XOR_ANY(__m128i, (in)[0], (in)[1], (in)[2], (in)[3], out) #define SALSA20_8_XOR_REG(out) \ SALSA20_8_XOR_ANY(/* empty */, Y0, Y1, Y2, Y3, out) typedef union { uint32_t w[16]; __m128i q[4]; } salsa20_blk_t; /** * blockmix_salsa8(Bin, Bout, r): * Compute Bout = BlockMix_{salsa20/8, r}(Bin). The input Bin must be 128r * bytes in length; the output Bout must also be the same size. */ static void blockmix_salsa8(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout, size_t r) { size_t i; __m128i X0, X1, X2, X3; r--; PREFETCH(&Bin[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin[i * 2], _MM_HINT_T0) PREFETCH(&Bin[i * 2 + 1], _MM_HINT_T0) } PREFETCH(&Bin[r * 2], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ X0 = Bin[r * 2 + 1].q[0]; X1 = Bin[r * 2 + 1].q[1]; X2 = Bin[r * 2 + 1].q[2]; X3 = Bin[r * 2 + 1].q[3]; /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i <= r; i++) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[i * 2].q, Bout[i].q) /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ SALSA20_8_XOR_MEM(Bin[i * 2 + 1].q, Bout[r + 1 + i].q) } } /* * (V)PSRLDQ and (V)PSHUFD have higher throughput than (V)PSRLQ on some CPUs * starting with Sandy Bridge. Additionally, PSHUFD uses separate source and * destination registers, whereas the shifts would require an extra move * instruction for our code when building without AVX. Unfortunately, PSHUFD * is much slower on Conroe (4 cycles latency vs. 1 cycle latency for PSRLQ) * and somewhat slower on some non-Intel CPUs (luckily not including AMD * Bulldozer and Piledriver). */ #ifdef __AVX__ #define HI32(X) \ _mm_srli_si128((X), 4) #elif 1 /* As an option, check for __SSE4_1__ here not to hurt Conroe */ #define HI32(X) \ _mm_shuffle_epi32((X), _MM_SHUFFLE(2,3,0,1)) #else #define HI32(X) \ _mm_srli_epi64((X), 32) #endif #if defined(__x86_64__) && (defined(__ICC) || defined(__llvm__)) /* Intel's name, also supported by recent gcc */ #define EXTRACT64(X) _mm_cvtsi128_si64(X) #elif defined(__x86_64__) && !defined(_MSC_VER) && !defined(__OPEN64__) /* gcc got the 'x' name earlier than non-'x', MSVC and Open64 had bugs */ #define EXTRACT64(X) _mm_cvtsi128_si64x(X) #elif defined(__x86_64__) && defined(__SSE4_1__) /* No known bugs for this intrinsic */ #include <smmintrin.h> #define EXTRACT64(X) _mm_extract_epi64((X), 0) #elif defined(__SSE4_1__) /* 32-bit */ #include <smmintrin.h> #if 0 /* This is currently unused by the code below, which instead uses these two * intrinsics explicitly when (!defined(__x86_64__) && defined(__SSE4_1__)) */ #define EXTRACT64(X) \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ ((uint64_t)(uint32_t)_mm_extract_epi32((X), 1) << 32)) #endif #else /* 32-bit or compilers with known past bugs in _mm_cvtsi128_si64*() */ #define EXTRACT64(X) \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(X) | \ ((uint64_t)(uint32_t)_mm_cvtsi128_si32(HI32(X)) << 32)) #endif /* This is tunable */ #define Swidth 8 /* Not tunable in this implementation, hard-coded in a few places */ #define PWXsimple 2 #define PWXgather 4 /* Derived values. Not tunable except via Swidth above. */ #define PWXbytes (PWXgather * PWXsimple * 8) #define Sbytes (3 * (1 << Swidth) * PWXsimple * 8) #define Smask (((1 << Swidth) - 1) * PWXsimple * 8) #define Smask2 (((uint64_t)Smask << 32) | Smask) #if !defined(__x86_64__) && defined(__SSE4_1__) /* 32-bit with SSE4.1 */ #define PWXFORM_X_T __m128i #define PWXFORM_SIMD(X, x, s0, s1) \ x = _mm_and_si128(X, _mm_set1_epi64x(Smask2)); \ s0 = *(__m128i *)(S0 + (uint32_t)_mm_cvtsi128_si32(x)); \ s1 = *(__m128i *)(S1 + (uint32_t)_mm_extract_epi32(x, 1)); \ X = _mm_mul_epu32(HI32(X), X); \ X = _mm_add_epi64(X, s0); \ X = _mm_xor_si128(X, s1); #else /* 64-bit, or 32-bit without SSE4.1 */ #define PWXFORM_X_T uint64_t #define PWXFORM_SIMD(X, x, s0, s1) \ x = EXTRACT64(X) & Smask2; \ s0 = *(__m128i *)(S0 + (uint32_t)x); \ s1 = *(__m128i *)(S1 + (x >> 32)); \ X = _mm_mul_epu32(HI32(X), X); \ X = _mm_add_epi64(X, s0); \ X = _mm_xor_si128(X, s1); #endif #define PWXFORM_WRITE \ *(__m128i *)(S2 + w) = X0; \ *(__m128i *)(S2 + w + 16) = X1; \ *(__m128i *)(S2 + w + 32) = X2; \ *(__m128i *)(S2 + w + 48) = X3; \ w += 64; #define PWXFORM_ROUND \ PWXFORM_SIMD(X0, x0, s00, s01) \ PWXFORM_SIMD(X1, x1, s10, s11) \ PWXFORM_SIMD(X2, x2, s20, s21) \ PWXFORM_SIMD(X3, x3, s30, s31) #define PWXFORM \ { \ PWXFORM_X_T x0, x1, x2, x3; \ __m128i s00, s01, s10, s11, s20, s21, s30, s31; \ PWXFORM_ROUND \ PWXFORM_ROUND PWXFORM_WRITE \ PWXFORM_ROUND PWXFORM_WRITE \ PWXFORM_ROUND PWXFORM_WRITE \ PWXFORM_ROUND PWXFORM_WRITE \ PWXFORM_ROUND \ w &= Smask; \ { \ uint8_t * Stmp = S2; \ S2 = S1; \ S1 = S0; \ S0 = Stmp; \ } \ } #define XOR4(in) \ X0 = _mm_xor_si128(X0, (in)[0]); \ X1 = _mm_xor_si128(X1, (in)[1]); \ X2 = _mm_xor_si128(X2, (in)[2]); \ X3 = _mm_xor_si128(X3, (in)[3]); #define OUT(out) \ (out)[0] = X0; \ (out)[1] = X1; \ (out)[2] = X2; \ (out)[3] = X3; typedef struct { uint8_t *S0, *S1, *S2; size_t w; } pwxform_ctx_t; #define Salloc (Sbytes + ((sizeof(pwxform_ctx_t) + 63) & ~63U)) /** * blockmix_pwxform(Bin, Bout, r, S): * Compute Bout = BlockMix_pwxform{salsa20/8, r, S}(Bin). The input Bin must * be 128r bytes in length; the output Bout must also be the same size. */ static void blockmix(const salsa20_blk_t *restrict Bin, salsa20_blk_t *restrict Bout, size_t r, pwxform_ctx_t *restrict ctx) { uint8_t *S0 = ctx->S0, *S1 = ctx->S1, *S2 = ctx->S2; size_t w = ctx->w; size_t i; __m128i X0, X1, X2, X3; /* Convert 128-byte blocks to 64-byte blocks */ /* 1: r_1 <-- 128r / PWXbytes */ r *= 2; r--; PREFETCH(&Bin[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin[i], _MM_HINT_T0) } /* 2: X <-- B'_{r_1 - 1} */ X0 = Bin[r].q[0]; X1 = Bin[r].q[1]; X2 = Bin[r].q[2]; X3 = Bin[r].q[3]; /* 3: for i = 0 to r_1 - 1 do */ i = 0; do { /* 5: X <-- X \xor B'_i */ XOR4(Bin[i].q) /* 7: X <-- pwxform(X) */ PWXFORM if (unlikely(i >= r)) break; /* 8: B'_i <-- X */ OUT(Bout[i].q) i++; } while (1); ctx->S0 = S0; ctx->S1 = S1; ctx->S2 = S2; ctx->w = w; /* 11: B_i <-- H(B_i) */ SALSA20_2(Bout[i].q) } #define XOR4_2(in1, in2) \ X0 = _mm_xor_si128((in1)[0], (in2)[0]); \ X1 = _mm_xor_si128((in1)[1], (in2)[1]); \ X2 = _mm_xor_si128((in1)[2], (in2)[2]); \ X3 = _mm_xor_si128((in1)[3], (in2)[3]); static uint32_t blockmix_salsa8_xor(const salsa20_blk_t *restrict Bin1, const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r) { size_t i; __m128i X0, X1, X2, X3; r--; PREFETCH(&Bin2[r * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[r * 2 + 1], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i * 2], _MM_HINT_T0) PREFETCH(&Bin1[i * 2], _MM_HINT_T0) PREFETCH(&Bin2[i * 2 + 1], _MM_HINT_T0) PREFETCH(&Bin1[i * 2 + 1], _MM_HINT_T0) } PREFETCH(&Bin2[r * 2], _MM_HINT_T0) PREFETCH(&Bin1[r * 2], _MM_HINT_T0) /* 1: X <-- B_{2r - 1} */ XOR4_2(Bin1[r * 2 + 1].q, Bin2[r * 2 + 1].q) /* 2: for i = 0 to 2r - 1 do */ for (i = 0; i <= r; i++) { /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2].q) SALSA20_8_XOR_MEM(Bin2[i * 2].q, Bout[i].q) /* 3: X <-- H(X \xor B_i) */ /* 4: Y_i <-- X */ /* 6: B' <-- (Y_0, Y_2 ... Y_{2r-2}, Y_1, Y_3 ... Y_{2r-1}) */ XOR4(Bin1[i * 2 + 1].q) SALSA20_8_XOR_MEM(Bin2[i * 2 + 1].q, Bout[r + 1 + i].q) } return _mm_cvtsi128_si32(X0); } static uint32_t blockmix_xor(const salsa20_blk_t *restrict Bin1, const salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, int Bin2_in_ROM, pwxform_ctx_t *restrict ctx) { uint8_t *S0 = ctx->S0, *S1 = ctx->S1, *S2 = ctx->S2; size_t w = ctx->w; size_t i; __m128i X0, X1, X2, X3; /* Convert 128-byte blocks to 64-byte blocks */ /* 1: r_1 <-- 128r / PWXbytes */ r *= 2; r--; if (Bin2_in_ROM) { PREFETCH(&Bin2[r], _MM_HINT_NTA) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_NTA) PREFETCH(&Bin1[i], _MM_HINT_T0) } } else { PREFETCH(&Bin2[r], _MM_HINT_T0) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_T0) PREFETCH(&Bin1[i], _MM_HINT_T0) } } /* 2: X <-- B'_{r_1 - 1} */ XOR4_2(Bin1[r].q, Bin2[r].q) /* 3: for i = 0 to r_1 - 1 do */ i = 0; r--; do { /* 5: X <-- X \xor B'_i */ XOR4(Bin1[i].q) XOR4(Bin2[i].q) /* 7: X <-- pwxform(X) */ PWXFORM /* 8: B'_i <-- X */ OUT(Bout[i].q) /* 5: X <-- X \xor B'_i */ XOR4(Bin1[i + 1].q) XOR4(Bin2[i + 1].q) /* 7: X <-- pwxform(X) */ PWXFORM if (unlikely(i >= r)) break; /* 8: B'_i <-- X */ OUT(Bout[i + 1].q) i += 2; } while (1); i++; ctx->S0 = S0; ctx->S1 = S1; ctx->S2 = S2; ctx->w = w; /* 11: B_i <-- H(B_i) */ SALSA20_2(Bout[i].q) return _mm_cvtsi128_si32(X0); } #undef XOR4 #define XOR4(in, out) \ (out)[0] = Y0 = _mm_xor_si128((in)[0], (out)[0]); \ (out)[1] = Y1 = _mm_xor_si128((in)[1], (out)[1]); \ (out)[2] = Y2 = _mm_xor_si128((in)[2], (out)[2]); \ (out)[3] = Y3 = _mm_xor_si128((in)[3], (out)[3]); #define XOR4_Y \ X0 = _mm_xor_si128(X0, Y0); \ X1 = _mm_xor_si128(X1, Y1); \ X2 = _mm_xor_si128(X2, Y2); \ X3 = _mm_xor_si128(X3, Y3); static uint32_t blockmix_xor_save(const salsa20_blk_t *restrict Bin1, salsa20_blk_t *restrict Bin2, salsa20_blk_t *restrict Bout, size_t r, pwxform_ctx_t *restrict ctx) { __m128i X0, X1, X2, X3, Y0, Y1, Y2, Y3; uint8_t *S0 = ctx->S0, *S1 = ctx->S1, *S2 = ctx->S2; size_t w = ctx->w; size_t i; /* Convert 128-byte blocks to 64-byte blocks */ /* 1: r_1 <-- 128r / PWXbytes */ r *= 2; r--; PREFETCH(&Bin2[r], _MM_HINT_T0) PREFETCH(&Bin1[r], _MM_HINT_T0) for (i = 0; i < r; i++) { PREFETCH(&Bin2[i], _MM_HINT_T0) PREFETCH(&Bin1[i], _MM_HINT_T0) } /* 2: X <-- B'_{r_1 - 1} */ XOR4_2(Bin1[r].q, Bin2[r].q) /* 3: for i = 0 to r_1 - 1 do */ i = 0; r--; do { XOR4(Bin1[i].q, Bin2[i].q) /* 5: X <-- X \xor B'_i */ XOR4_Y /* 7: X <-- pwxform(X) */ PWXFORM /* 8: B'_i <-- X */ OUT(Bout[i].q) XOR4(Bin1[i + 1].q, Bin2[i + 1].q) /* 5: X <-- X \xor B'_i */ XOR4_Y /* 7: X <-- pwxform(X) */ PWXFORM if (unlikely(i >= r)) break; /* 8: B'_i <-- X */ OUT(Bout[i + 1].q) i += 2; } while (1); i++; ctx->S0 = S0; ctx->S1 = S1; ctx->S2 = S2; ctx->w = w; /* 11: B_i <-- H(B_i) */ SALSA20_2(Bout[i].q) return _mm_cvtsi128_si32(X0); } #undef ARX #undef SALSA20_2ROUNDS #undef SALSA20_2 #undef SALSA20_8_XOR_ANY #undef SALSA20_8_XOR_MEM #undef SALSA20_8_XOR_REG #undef PWXFORM_X_T #undef PWXFORM_SIMD #undef PWXFORM_ROUND #undef PWXFORM #undef OUT #undef XOR4 #undef XOR4_2 #undef XOR4_Y /** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static inline uint32_t integerify(const salsa20_blk_t * B, size_t r) { return B[2 * r - 1].w[0]; } /** * smix1(B, r, N, flags, V, NROM, VROM, XY, ctx): * Compute first loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 128r bytes in length. The value N must be even and no * smaller than 2. The array V must be aligned to a multiple of 64 bytes, and * arrays B and XY to a multiple of at least 16 bytes (aligning them to 64 * bytes as well saves cache lines, but might result in cache bank conflicts). */ static void smix1(uint8_t * B, size_t r, uint32_t N, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const salsa20_blk_t * VROM, salsa20_blk_t * XY, pwxform_ctx_t * ctx) { size_t s = 2 * r; salsa20_blk_t * X = V, * Y; uint32_t i, j; size_t k; /* 1: X <-- B */ /* 3: V_i <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } if (VROM) { uint32_t n; salsa20_blk_t * V_n; const salsa20_blk_t * V_j; /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[s]; blockmix(X, Y, r, ctx); X = &V[2 * s]; /* j <-- Integerify(X) mod NROM */ j = integerify(Y, r) & (NROM - 1); V_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ j = blockmix_xor(Y, V_j, X, r, 1, ctx); for (n = 2; n < N; n <<= 1) { uint32_t m = (n < N / 2) ? n : (N - 1 - n); V_n = &V[n * s]; /* 2: for i = 0 to N - 1 do */ for (i = 1; i < m; i += 2) { /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i - 1; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V_n[i * s]; /* j <-- Integerify(X) mod NROM */ j = blockmix_xor(X, V_j, Y, r, 0, ctx) & (NROM - 1); V_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ X = &V_n[(i + 1) * s]; j = blockmix_xor(Y, V_j, X, r, 1, ctx); } } n >>= 1; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 2 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[(N - 1) * s]; /* j <-- Integerify(X) mod NROM */ j = blockmix_xor(X, V_j, Y, r, 0, ctx) & (NROM - 1); V_j = &VROM[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ X = XY; blockmix_xor(Y, V_j, X, r, 1, ctx); } else if (flags & YESCRYPT_RW) { uint32_t n; salsa20_blk_t * V_n, * V_j; /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[s]; blockmix(X, Y, r, ctx); /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V[2 * s]; blockmix(Y, X, r, ctx); j = integerify(X, r); for (n = 2; n < N; n <<= 1) { uint32_t m = (n < N / 2) ? n : (N - 1 - n); V_n = &V[n * s]; /* 2: for i = 0 to N - 1 do */ for (i = 1; i < m; i += 2) { Y = &V_n[i * s]; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i - 1; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ j = blockmix_xor(X, V_j, Y, r, 0, ctx); /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += i; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V_n[(i + 1) * s]; j = blockmix_xor(Y, V_j, X, r, 0, ctx); } } n >>= 1; /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 2 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[(N - 1) * s]; j = blockmix_xor(X, V_j, Y, r, 0, ctx); /* j <-- Wrap(Integerify(X), i) */ j &= n - 1; j += N - 1 - n; V_j = &V[j * s]; /* X <-- X \xor V_j */ /* 4: X <-- H(X) */ X = XY; blockmix_xor(Y, V_j, X, r, 0, ctx); } else { /* 2: for i = 0 to N - 1 do */ for (i = 1; i < N - 1; i += 2) { /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[i * s]; blockmix_salsa8(X, Y, r); /* 4: X <-- H(X) */ /* 3: V_i <-- X */ X = &V[(i + 1) * s]; blockmix_salsa8(Y, X, r); } /* 4: X <-- H(X) */ /* 3: V_i <-- X */ Y = &V[i * s]; blockmix_salsa8(X, Y, r); /* 4: X <-- H(X) */ X = XY; blockmix_salsa8(Y, X, r); } /* B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]); } } } /** * smix2(B, r, N, Nloop, flags, V, NROM, VROM, XY, ctx): * Compute second loop of B = SMix_r(B, N). The input B must be 128r bytes in * length; the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r bytes in length. The value N must be a power of 2 * greater than 1. The value Nloop must be even. The array V must be aligned * to a multiple of 64 bytes, and arrays B and XY to a multiple of at least 16 * bytes (aligning them to 64 bytes as well saves cache lines, but might result * in cache bank conflicts). */ static void smix2(uint8_t * B, size_t r, uint32_t N, uint64_t Nloop, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const salsa20_blk_t * VROM, salsa20_blk_t * XY, pwxform_ctx_t * ctx) { size_t s = 2 * r; salsa20_blk_t * X = XY, * Y = &XY[s]; uint64_t i; uint32_t j; size_t k; if (Nloop == 0) return; /* X <-- B' */ /* 3: V_i <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X[k].w[i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } i = Nloop / 2; /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* * Normally, VROM implies YESCRYPT_RW, but we check for these separately * because our SMix resets YESCRYPT_RW for the smix2() calls operating on the * entire V when p > 1. */ if (VROM && (flags & YESCRYPT_RW)) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { salsa20_blk_t * V_j = &V[j * s]; const salsa20_blk_t * VROM_j; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor_save(X, V_j, Y, r, ctx) & (NROM - 1); VROM_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, VROM_j, X, r, 1, ctx) & (N - 1); V_j = &V[j * s]; } } else if (VROM) { /* 6: for i = 0 to N - 1 do */ for (i = 0; i < Nloop; i += 2) { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* j <-- Integerify(X) mod NROM */ j = blockmix_xor(X, V_j, Y, r, 0, ctx) & (NROM - 1); V_j = &VROM[j * s]; /* X <-- H(X \xor VROM_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, V_j, X, r, 1, ctx) & (N - 1); V_j = &V[j * s]; } } else if (flags & YESCRYPT_RW) { /* 6: for i = 0 to N - 1 do */ do { salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor_save(X, V_j, Y, r, ctx) & (N - 1); V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* V_j <-- Xprev \xor V_j */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor_save(Y, V_j, X, r, ctx) & (N - 1); } while (--i); } else if (ctx) { /* 6: for i = 0 to N - 1 do */ do { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(X, V_j, Y, r, 0, ctx) & (N - 1); V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_xor(Y, V_j, X, r, 0, ctx) & (N - 1); } while (--i); } else { /* 6: for i = 0 to N - 1 do */ do { const salsa20_blk_t * V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_salsa8_xor(X, V_j, Y, r) & (N - 1); V_j = &V[j * s]; /* 8: X <-- H(X \xor V_j) */ /* 7: j <-- Integerify(X) mod N */ j = blockmix_salsa8_xor(Y, V_j, X, r) & (N - 1); } while (--i); } /* 10: B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X[k].w[i]); } } } /** * p2floor(x): * Largest power of 2 not greater than argument. */ static uint64_t p2floor(uint64_t x) { uint64_t y; while ((y = x & (x - 1))) x = y; return x; } /** * smix(B, r, N, p, t, flags, V, NROM, VROM, XY, S, passwd): * Compute B = SMix_r(B, N). The input B must be 128rp bytes in length; the * temporary storage V must be 128rN bytes in length; the temporary storage XY * must be 256r or 256rp bytes in length (the larger size is required with * OpenMP-enabled builds). The value N must be a power of 2 greater than 1. * The array V must be aligned to a multiple of 64 bytes, and arrays B and * XY to a multiple of at least 16 bytes (aligning them to 64 bytes as well * saves cache lines and helps avoid false sharing in OpenMP-enabled builds * when p > 1, but it might also result in cache bank conflicts). */ static void smix(uint8_t * B, size_t r, uint32_t N, uint32_t p, uint32_t t, yescrypt_flags_t flags, salsa20_blk_t * V, uint32_t NROM, const salsa20_blk_t * VROM, salsa20_blk_t * XY, uint8_t * S, uint8_t * passwd) { size_t s = 2 * r; uint32_t Nchunk; uint64_t Nloop_all, Nloop_rw; uint32_t i; /* 1: n <-- N / p */ Nchunk = N / p; /* 2: Nloop_all <-- fNloop(n, t, flags) */ Nloop_all = Nchunk; if (flags & YESCRYPT_RW) { if (t <= 1) { if (t) Nloop_all *= 2; /* 2/3 */ Nloop_all = (Nloop_all + 2) / 3; /* 1/3, round up */ } else { Nloop_all *= t - 1; } } else if (t) { if (t == 1) Nloop_all += (Nloop_all + 1) / 2; /* 1.5, round up */ Nloop_all *= t; } /* 6: Nloop_rw <-- 0 */ Nloop_rw = 0; if (flags & __YESCRYPT_INIT_SHARED) { Nloop_rw = Nloop_all; } else { /* 3: if YESCRYPT_RW flag is set */ if (flags & YESCRYPT_RW) { /* 4: Nloop_rw <-- Nloop_all / p */ Nloop_rw = Nloop_all / p; } } /* 8: n <-- n - (n mod 2) */ Nchunk &= ~(uint32_t)1; /* round down to even */ /* 9: Nloop_all <-- Nloop_all + (Nloop_all mod 2) */ Nloop_all++; Nloop_all &= ~(uint64_t)1; /* round up to even */ /* 10: Nloop_rw <-- Nloop_rw + (Nloop_rw mod 2) */ Nloop_rw++; Nloop_rw &= ~(uint64_t)1; /* round up to even */ /* 11: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel if (p > 1) default(none) private(i) shared(B, r, N, p, flags, V, NROM, VROM, XY, S, passwd, s, Nchunk, Nloop_all, Nloop_rw) { #pragma omp for #endif for (i = 0; i < p; i++) { /* 12: u <-- in */ uint32_t Vchunk = i * Nchunk; /* 13: if i = p - 1 */ /* 14: n <-- N - u */ /* 15: end if */ /* 16: v <-- u + n - 1 */ uint32_t Np = (i < p - 1) ? Nchunk : (N - Vchunk); uint8_t * Bp = &B[128 * r * i]; salsa20_blk_t * Vp = &V[Vchunk * s]; #ifdef _OPENMP salsa20_blk_t * XYp = &XY[i * (2 * s)]; #else salsa20_blk_t * XYp = XY; #endif pwxform_ctx_t * ctx_i = NULL; /* 17: if YESCRYPT_RW flag is set */ if (flags & YESCRYPT_RW) { uint8_t *Si = S + i * Salloc; /* 18: SMix1_1(B_i, Sbytes / 128, S_i, no flags) */ smix1(Bp, 1, Sbytes / 128, 0 /* no flags */, (salsa20_blk_t *)Si, 0, NULL, XYp, NULL); ctx_i = (pwxform_ctx_t *)(Si + Sbytes); /* 19: S2_i <-- S_{i,0...2^Swidth-1} */ ctx_i->S2 = Si; /* 20: S1_i <-- S_{i,2^Swidth...2*2^Swidth-1} */ ctx_i->S1 = Si + Sbytes / 3; /* 21: S0_i <-- S_{i,2*2^Swidth...3*2^Swidth-1} */ ctx_i->S0 = Si + Sbytes / 3 * 2; /* 22: w_i <-- 0 */ ctx_i->w = 0; /* 23: if i = 0 */ if (i == 0) { /* 24: passwd <-- HMAC-SHA256(B_{0,2r-1}, passwd) */ HMAC_SHA256_CTX_Y ctx; HMAC_SHA256_Init_Y(&ctx, Bp + (128 * r - 64), 64); HMAC_SHA256_Update_Y(&ctx, passwd, 32); HMAC_SHA256_Final_Y(passwd, &ctx); } } if (!(flags & __YESCRYPT_INIT_SHARED_2)) { /* 27: SMix1_r(B_i, n, V_{u..v}, flags) */ smix1(Bp, r, Np, flags, Vp, NROM, VROM, XYp, ctx_i); } /* 28: SMix2_r(B_i, p2floor(n), Nloop_rw, V_{u..v}, flags) */ smix2(Bp, r, p2floor(Np), Nloop_rw, flags, Vp, NROM, VROM, XYp, ctx_i); } /* 30: for i = 0 to p - 1 do */ if (Nloop_all > Nloop_rw) { #ifdef _OPENMP #pragma omp for #endif for (i = 0; i < p; i++) { uint8_t * Bp = &B[128 * r * i]; #ifdef _OPENMP salsa20_blk_t * XYp = &XY[i * (2 * s)]; #else salsa20_blk_t * XYp = XY; #endif pwxform_ctx_t * ctx_i = NULL; if (flags & YESCRYPT_RW) { uint8_t *Si = S + i * Salloc; ctx_i = (pwxform_ctx_t *)(Si + Sbytes); } /* 31: SMix2_r(B_i, N, Nloop_all - Nloop_rw, V, flags excluding YESCRYPT_RW) */ smix2(Bp, r, N, Nloop_all - Nloop_rw, flags & ~YESCRYPT_RW, V, NROM, VROM, XYp, ctx_i); } } #ifdef _OPENMP } #endif } /** * yescrypt_kdf_body(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, flags, buf, buflen): * Compute scrypt(passwd[0 .. passwdlen - 1], salt[0 .. saltlen - 1], N, r, * p, buflen), or a revision of scrypt as requested by flags and shared, and * write the result into buf. The parameters r, p, and buflen must satisfy * r * p < 2^30 and buflen <= (2^32 - 1) * 32. The parameter N must be a power * of 2 greater than 1. (This optimized implementation currently additionally * limits N to the range from 8 to 2^31, but other implementation might not.) * * t controls computation time while not affecting peak memory usage. shared * and flags may request special modes as described in yescrypt.h. local is * the thread-local data structure, allowing to preserve and reuse a memory * allocation across calls, thereby reducing its overhead. * * Return 0 on success; or -1 on error. */ static int yescrypt_kdf_body(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { yescrypt_region_t tmp; uint64_t NROM; const salsa20_blk_t * VROM; size_t B_size, V_size, XY_size, need; uint8_t * B, * S; salsa20_blk_t * V, * XY; uint8_t sha256[32]; uint8_t dk[sizeof(sha256)], * dkp = buf; /* Sanity-check parameters */ if (flags & ~YESCRYPT_KNOWN_FLAGS) { errno = EINVAL; return -1; } #if SIZE_MAX > UINT32_MAX if (buflen > (((uint64_t)(1) << 32) - 1) * 32) { errno = EFBIG; return -1; } #endif if ((uint64_t)(r) * (uint64_t)(p) >= (1 << 30)) { errno = EFBIG; return -1; } if (N > UINT32_MAX) { errno = EFBIG; return -1; } if (((N & (N - 1)) != 0) || (N <= 3) || (r < 1) || (p < 1)) { errno = EINVAL; return -1; } if ((r > SIZE_MAX / 256 / p) || (N > SIZE_MAX / 128 / r)) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_RW) { if (N / p <= 3) { errno = EINVAL; return -1; } if (p > SIZE_MAX / Salloc) { errno = ENOMEM; return -1; } } #ifdef _OPENMP else if (N > SIZE_MAX / 128 / (r * p)) { errno = ENOMEM; return -1; } #endif NROM = 0; VROM = NULL; if (shared) { NROM = shared->aligned_size / ((size_t)128 * r); if (NROM > UINT32_MAX) { errno = EFBIG; return -1; } if (((NROM & (NROM - 1)) != 0) || (NROM <= 1) || !(flags & YESCRYPT_RW)) { errno = EINVAL; return -1; } VROM = shared->aligned; } /* Allocate memory */ V = NULL; V_size = (size_t)128 * r * N; #ifdef _OPENMP if (!(flags & YESCRYPT_RW)) V_size *= p; #endif need = V_size; if (flags & __YESCRYPT_INIT_SHARED) { if (local->aligned_size < need) { if (local->base || local->aligned || local->base_size || local->aligned_size) { errno = EINVAL; return -1; } if (!alloc_region(local, need)) return -1; } V = (salsa20_blk_t *)local->aligned; need = 0; } B_size = (size_t)128 * r * p; need += B_size; if (need < B_size) { errno = ENOMEM; return -1; } XY_size = (size_t)256 * r; #ifdef _OPENMP XY_size *= p; #endif need += XY_size; if (need < XY_size) { errno = ENOMEM; return -1; } if (flags & YESCRYPT_RW) { size_t S_size = (size_t)Salloc * p; need += S_size; if (need < S_size) { errno = ENOMEM; return -1; } } if (flags & __YESCRYPT_INIT_SHARED) { if (!alloc_region(&tmp, need)) return -1; B = (uint8_t *)tmp.aligned; XY = (salsa20_blk_t *)((uint8_t *)B + B_size); } else { init_region(&tmp); if (local->aligned_size < need) { if (free_region(local)) return -1; if (!alloc_region(local, need)) return -1; } B = (uint8_t *)local->aligned; V = (salsa20_blk_t *)((uint8_t *)B + B_size); XY = (salsa20_blk_t *)((uint8_t *)V + V_size); } S = NULL; if (flags & YESCRYPT_RW) S = (uint8_t *)XY + XY_size; if (flags) { HMAC_SHA256_CTX_Y ctx; HMAC_SHA256_Init_Y(&ctx, "yescrypt-prehash", (flags & __YESCRYPT_PREHASH) ? 16 : 8); HMAC_SHA256_Update_Y(&ctx, passwd, passwdlen); HMAC_SHA256_Final_Y(sha256, &ctx); passwd = sha256; passwdlen = sizeof(sha256); } /* 1: (B_0 ... B_{p-1}) <-- PBKDF2(P, S, 1, p * MFLen) */ PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, 1, B, B_size); if (t || flags) memcpy(sha256, B, sizeof(sha256)); if (p == 1 || (flags & YESCRYPT_RW)) { smix(B, r, N, p, t, flags, V, NROM, VROM, XY, S, sha256); } else { uint32_t i; /* 2: for i = 0 to p - 1 do */ #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(B, r, N, p, t, flags, V, NROM, VROM, XY, S) #endif for (i = 0; i < p; i++) { /* 3: B_i <-- MF(B_i, N) */ #ifdef _OPENMP smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, &V[(size_t)2 * r * i * N], NROM, VROM, &XY[(size_t)4 * r * i], NULL, NULL); #else smix(&B[(size_t)128 * r * i], r, N, 1, t, flags, V, NROM, VROM, XY, NULL, NULL); #endif } } dkp = buf; if (flags && buflen < sizeof(dk)) { PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, dk, sizeof(dk)); dkp = dk; } /* 5: DK <-- PBKDF2(P, B, 1, dkLen) */ PBKDF2_SHA256(passwd, passwdlen, B, B_size, 1, buf, buflen); /* * Except when computing classic scrypt, allow all computation so far * to be performed on the client. The final steps below match those of * SCRAM (RFC 5802), so that an extension of SCRAM (with the steps so * far in place of SCRAM's use of PBKDF2 and with SHA-256 in place of * SCRAM's use of SHA-1) would be usable with yescrypt hashes. */ if (flags && !(flags & __YESCRYPT_PREHASH)) { /* Compute ClientKey */ { HMAC_SHA256_CTX_Y ctx; HMAC_SHA256_Init_Y(&ctx, dkp, sizeof(dk)); HMAC_SHA256_Update_Y(&ctx, "Client Key", 10); HMAC_SHA256_Final_Y(sha256, &ctx); } /* Compute StoredKey */ { SHA256_CTX_Y ctx; size_t clen = buflen; if (clen > sizeof(dk)) clen = sizeof(dk); SHA256_Init_Y(&ctx); SHA256_Update_Y(&ctx, sha256, sizeof(sha256)); SHA256_Final_Y(dk, &ctx); memcpy(buf, dk, clen); } } if (free_region(&tmp)) return -1; /* Success! */ return 0; } /** * yescrypt_kdf(shared, local, passwd, passwdlen, salt, saltlen, * N, r, p, t, g, flags, buf, buflen): * Compute scrypt or its revision as requested by the parameters. The inputs * to this function are the same as those for yescrypt_kdf_body() above, with * the addition of g, which controls hash upgrades (0 for no upgrades so far). */ int yescrypt_kdf(const yescrypt_shared_t * shared, yescrypt_local_t * local, const uint8_t * passwd, size_t passwdlen, const uint8_t * salt, size_t saltlen, uint64_t N, uint32_t r, uint32_t p, uint32_t t, uint32_t g, yescrypt_flags_t flags, uint8_t * buf, size_t buflen) { uint8_t dk[32]; if ((flags & (YESCRYPT_RW | __YESCRYPT_INIT_SHARED)) == YESCRYPT_RW && p >= 1 && N / p >= 0x100 && N / p * r >= 0x20000) { int retval = yescrypt_kdf_body(shared, local, passwd, passwdlen, salt, saltlen, N >> 6, r, p, 0, flags | __YESCRYPT_PREHASH, dk, sizeof(dk)); if (retval) return retval; passwd = dk; passwdlen = sizeof(dk); } do { uint8_t * dkp = g ? dk : buf; size_t dklen = g ? sizeof(dk) : buflen; int retval = yescrypt_kdf_body(shared, local, passwd, passwdlen, salt, saltlen, N, r, p, t, flags, dkp, dklen); if (retval) return retval; passwd = dkp; passwdlen = dklen; N <<= 2; if (!N) return -1; t >>= 1; } while (g--); return 0; }
reduce_mean.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_REDUCE_MEAN_H_ #define MACE_KERNELS_REDUCE_MEAN_H_ #if defined(MACE_ENABLE_NEON) && defined(__aarch64__) #include <arm_neon.h> #endif #include <algorithm> #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/kernel.h" namespace mace { namespace kernels { template <DeviceType D, typename T> struct ReduceMeanFunctor : OpKernel { ReduceMeanFunctor(OpKernelContext *context, const std::vector<int> &axis, const bool keep_dims) : OpKernel(context), axis_(axis), keep_dims_(keep_dims) {} void Simplify(const Tensor *input) { std::vector<bool> bitmap(static_cast<uint32_t>(input->dim_size()), false); if (axis_.size() == 0) { for (int i = 0; i < input->dim_size(); ++i) { bitmap[i] = true; } } else { for (unsigned int i = 0; i < axis_.size(); ++i) { const int index = axis_[i] >= 0 ? axis_[i] : axis_[i] + input->dim_size(); bitmap[index] = true; } } out_shape_.clear(); for (unsigned int i = 0; i < input->dim_size(); ++i) { if (!bitmap[i]) { out_shape_.push_back(input->dim(i)); } else if (keep_dims_) { out_shape_.push_back(1); } } data_reshape_.clear(); unsigned int dim_index = 0; for (; dim_index < input->dim_size(); ++dim_index) { if (input->dim(dim_index) != 1) break; } if (dim_index >= input->dim_size()) { reduce_first_axis_ = true; } else { reduce_first_axis_ = bitmap[dim_index]; data_reshape_.push_back(input->dim(dim_index)); ++dim_index; for (; dim_index < input->dim_size(); ++dim_index) { const int n = input->dim(dim_index); if (n == 1) { bitmap[dim_index] = bitmap[dim_index - 1]; } if (bitmap[dim_index-1] != bitmap[dim_index]) { data_reshape_.push_back(n); } else { data_reshape_.back() *= n; } } } } void Compute(const Tensor *input, Tensor *output) { Tensor::MappingGuard input_mapper(input); const T *input_ptr = input->data<T>(); Tensor::MappingGuard output_map(output); T *output_ptr = output->mutable_data<T>(); memset(output_ptr, 0, output->size() * sizeof(T)); switch (data_reshape_.size()) { case 1: if (reduce_first_axis_) { T sum = 0; for (int i = 0; i < data_reshape_[0]; ++i) { sum = sum + input_ptr[i]; } output_ptr[0] = sum / data_reshape_[0]; } else { #pragma omp parallel for for (int i = 0; i < data_reshape_[0]; ++i) { output_ptr[i] = input_ptr[i]; } } break; case 2: if (reduce_first_axis_) { #pragma omp parallel for for (int i = 0; i < data_reshape_[1]; ++i) { for (int j = 0; j < data_reshape_[0]; ++j) { output_ptr[i] += input_ptr[j * data_reshape_[1] + i]; } output_ptr[i] /= data_reshape_[0]; } } else { #pragma omp parallel for for (int i = 0; i < data_reshape_[0]; ++i) { for (int j = 0; j < data_reshape_[1]; ++j) { output_ptr[i] += input_ptr[i * data_reshape_[1] + j]; } output_ptr[i] /= data_reshape_[1]; } } break; case 3: if (reduce_first_axis_) { #pragma omp parallel for for (int i = 0; i < data_reshape_[1]; ++i) { for (int j = 0; j < data_reshape_[2]; ++j) { for (int k = 0; k < data_reshape_[0]; ++k) { output_ptr[i] += input_ptr[(k * data_reshape_[1] + i) * data_reshape_[2] + j]; } } output_ptr[i] /= (data_reshape_[0] * data_reshape_[2]); } } else { #pragma omp parallel for collapse(2) for (int i = 0; i < data_reshape_[0]; ++i) { for (int j = 0; j < data_reshape_[2]; ++j) { for (int k = 0; k < data_reshape_[1]; ++k) { output_ptr[i * data_reshape_[2] + j] += input_ptr[(i * data_reshape_[1] + k) * data_reshape_[2] + j]; } output_ptr[i * data_reshape_[2] + j] /= data_reshape_[1]; } } } break; case 4: if (reduce_first_axis_) { #pragma omp parallel for collapse(2) for (int i = 0; i < data_reshape_[1]; ++i) { for (int j = 0; j < data_reshape_[3]; ++j) { for (int k = 0; k < data_reshape_[2]; ++k) { for (int t = 0; t < data_reshape_[0]; ++t) { output_ptr[i * data_reshape_[3] + j] += input_ptr[((t * data_reshape_[1] + i) * data_reshape_[2] + k)*data_reshape_[3] + j]; } } output_ptr[i * data_reshape_[3] + j] /= (data_reshape_[0] * data_reshape_[2]); } } } else { #pragma omp parallel for collapse(2) for (int i = 0; i < data_reshape_[0]; ++i) { for (int j = 0; j < data_reshape_[2]; ++j) { for (int k = 0; k < data_reshape_[1]; ++k) { for (int t = 0; t < data_reshape_[3]; ++t) { output_ptr[i * data_reshape_[2] + j] += input_ptr[((i * data_reshape_[1] + k) * data_reshape_[2] + j)*data_reshape_[3] + t]; } } output_ptr[i * data_reshape_[2] + j] /= (data_reshape_[1] * data_reshape_[3]); } } } break; default: MACE_CHECK(false, "not implemented in mace") << "data reshape size" << data_reshape_.size() << "reduce first axis:" << reduce_first_axis_; break; } } MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); Simplify(input); output->Resize(out_shape_); Compute(input, output); return MACE_SUCCESS; } const std::vector<int> axis_; bool keep_dims_; bool reduce_first_axis_; std::vector<int> data_reshape_; std::vector<index_t> out_shape_; }; #ifdef MACE_ENABLE_OPENCL class OpenCLReduceMeanKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const Tensor *input, Tensor *output, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLReduceMeanKernel); }; template <typename T> struct ReduceMeanFunctor<DeviceType::GPU, T> : OpKernel { ReduceMeanFunctor(OpKernelContext *context, const std::vector<int> &axis, const bool keep_dims); MaceStatus operator()(const Tensor *input, Tensor *output, StatsFuture *future); std::unique_ptr<OpenCLReduceMeanKernel> kernel_; }; #endif } // namespace kernels } // namespace mace #endif // MACE_KERNELS_REDUCE_MEAN_H_
test_loops_omp.c
/** @file class.c * Julien Lesgourgues, 17.04.2011 */ /* this main calls CLASS several times in a loop, with different input parameters. It illustrates how the code could be interfaced with a parameter extraction code. */ /* JL 17.03.2016: implemented here nested openMP loops. The user chooses how many instances of CLASS are run in parallel (by setting the variable number_of_class_instances below). Each of them uses a number of thread such that all cores are used. */ #include "class.h" int class( struct file_content *pfc, struct precision * ppr, struct background * pba, struct thermo * pth, struct perturbs * ppt, struct primordial * ppm, struct nonlinear * pnl, struct transfers * ptr, struct spectra * psp, struct lensing * ple, struct output * pop, int l_max, double ** cl, ErrorMsg errmsg) { int l; class_call(input_init(pfc,ppr,pba,pth,ppt,ptr,ppm,psp,pnl,ple,pop,errmsg), errmsg, errmsg); class_call(background_init(ppr,pba), pba->error_message, errmsg); class_call(thermodynamics_init(ppr,pba,pth), pth->error_message, errmsg); class_call(perturb_init(ppr,pba,pth,ppt), ppt->error_message, errmsg); class_call(primordial_init(ppr,ppt,ppm), ppm->error_message, errmsg); class_call(nonlinear_init(ppr,pba,pth,ppt,ppm,pnl), pnl->error_message, errmsg); class_call(transfer_init(ppr,pba,pth,ppt,pnl,ptr), ptr->error_message, errmsg); class_call(spectra_init(ppr,pba,ppt,ppm,pnl,ptr,psp), psp->error_message, errmsg); class_call(lensing_init(ppr,ppt,psp,pnl,ple), ple->error_message, errmsg); /****** write the Cl values in the input array cl[l] *******/ for (l=2; l <= l_max; l++) { class_call(output_total_cl_at_l(psp,ple,pop,(double)l,cl[l]), psp->error_message, errmsg); } /****** all calculations done, now free the structures ******/ class_call(lensing_free(ple), ple->error_message, errmsg); class_call(spectra_free(psp), psp->error_message, errmsg); class_call(transfer_free(ptr), ptr->error_message, errmsg); class_call(nonlinear_free(pnl), pnl->error_message, errmsg); class_call(primordial_free(ppm), ppm->error_message, errmsg); class_call(perturb_free(ppt), ppt->error_message, errmsg); class_call(thermodynamics_free(pth), pth->error_message, errmsg); class_call(background_free(pba), pba->error_message, errmsg); return _SUCCESS_; } int main() { /* shared variable that will be common to all CLASS instances */ int i; int l,l_max; int num_ct_max=7; int num_loops=10; struct file_content fc; ErrorMsg errmsg_parser; int total_number_of_threads; int number_of_class_instances; int number_of_threads_inside_class; int index_ct_tt; int index_ct_ee; int index_ct_te; /* dealing with the openMP part (number of instances, number of threads per instance...) */ #ifdef _OPENMP /* Determine the number of threads, class instances and nested threads */ total_number_of_threads = omp_get_max_threads(); /* User-fixed number of CLASS instances to be run in parallel (Total number of threads should be dividable by this number) */ number_of_class_instances=2; if ((total_number_of_threads % number_of_class_instances) != 0) printf("The total number of threads, %d, is not a mutiple of the requested number of CLASS instances, %d\n",total_number_of_threads,number_of_class_instances); number_of_threads_inside_class = total_number_of_threads/number_of_class_instances; /* inferred number of threads per instance */ printf("# Total number of available threads = %d, used to run\n", total_number_of_threads); printf("# -> %d CLASS executables in parallel\n", number_of_class_instances); printf("# -> %d threads inside each CLASS executables\n", number_of_threads_inside_class); /* Turn on nested parallelism */ omp_set_nested(1); #endif /* choose a value of l_max in C_l's */ l_max=3000; /* all parameters for which we don't want to keep default values should be passed to the code through a file_content structure. Create such a structure with the size you need: 10 in this exemple */ parser_init(&fc,10,"",errmsg_parser); /* assign values to these 9 parameters. Some will be fixed, some will be varied in the loop. */ strcpy(fc.name[0],"output"); strcpy(fc.value[0],"tCl,pCl,lCl"); strcpy(fc.name[1],"l_max_scalars"); sprintf(fc.value[1],"%d",l_max); strcpy(fc.name[2],"lensing"); sprintf(fc.value[2],"yes"); strcpy(fc.name[3],"H0"); sprintf(fc.value[3],"%e",72.); strcpy(fc.name[4],"omega_b"); sprintf(fc.value[4],"%e",0.024); strcpy(fc.name[5],"omega_cdm"); sprintf(fc.value[5],"%e",0.05); strcpy(fc.name[6],"z_reio"); sprintf(fc.value[6],"%e",10.); strcpy(fc.name[7],"A_s"); sprintf(fc.value[7],"%e",2.3e-9); strcpy(fc.name[8],"n_s"); sprintf(fc.value[8],"%e",1.); strcpy(fc.name[9],"perturbations_verbose"); sprintf(fc.value[9],"%d",0); // Trick: set to 2 to cross-check actual number of threads per CLASS instance /* Create an array of Cl's where all results will be stored for each parameter value in the loop */ double *** cl; cl = malloc(num_loops*sizeof(double**)); /* Create one thread for each instance of CLASS */ #pragma omp parallel num_threads(number_of_class_instances) { /* set the number of threads inside each CLASS instance */ #ifdef _OPENMP omp_set_num_threads(number_of_threads_inside_class); #endif /* for each thread/instance, create all CLASS input/output structures (these variables are being declared insode the parallel zone, hence they are openMP private variables) */ struct precision pr; /* for precision parameters */ struct background ba; /* for cosmological background */ struct thermo th; /* for thermodynamics */ struct perturbs pt; /* for source functions */ struct transfers tr; /* for transfer functions */ struct primordial pm; /* for primordial spectra */ struct spectra sp; /* for output spectra */ struct nonlinear nl; /* for non-linear spectra */ struct lensing le; /* for lensed spectra */ struct output op; /* for output files */ ErrorMsg errmsg; /* for error messages */ struct file_content fc_local; int j,iam; /* copy the shared file content into the local file content used by each instance */ parser_init(&fc_local,fc.size,"",errmsg); for (j=0; j < fc.size; j++) { strcpy(fc_local.value[j],fc.value[j]); strcpy(fc_local.name[j],fc.name[j]); fc_local.read[j]=fc.read[j]; } /* loop over (num_loops) values of some parameters: in this exemple, omega_b */ #pragma omp for schedule(static,1) for (i=0; i<=num_loops; i++) { #ifdef _OPENMP iam=omp_get_thread_num(); #else iam=0; #endif /* assign one value to omega_b */ sprintf(fc_local.value[4],"%e",0.01+i*0.002); printf("# %d\tthread=%d : running with omega_b = %s\n",i,iam,fc_local.value[4]); /* allocate the array where the Cl's calculated by one instance will be written (we could add another array with P(k), or extract other results from the code - here we assume that we are interested in the C_l's only */ cl[i]=malloc((l_max+1)*sizeof(double*)); for (l=0;l<=l_max;l++) cl[i][l]=malloc(num_ct_max*sizeof(double)); /* calls class and return the C_l's*/ if (class(&fc_local,&pr,&ba,&th,&pt,&pm,&nl,&tr,&sp,&le,&op,l_max,cl[i],errmsg) == _FAILURE_) { printf("\n\nError in class \n=>%s\n",errmsg); //return _FAILURE_; } /* if this is the first call, extract dynamically the value of indices used in the output */ if ((i==0) && (iam==0)) { index_ct_tt=sp.index_ct_tt; index_ct_te=sp.index_ct_te; index_ct_ee=sp.index_ct_ee; } } // end of loop over parameters } // end parallel zone /* write in file the lensed C_l^TT, C_l^EE, C_l^TE's obtained in all runs */ FILE * out=fopen("output/test_loops_omp.dat","w"); for (i=0; i<num_loops; i++) { for (l=2;l<=l_max;l++) { fprintf(out,"%d %e %e %e\n", l, l*(l+1)*cl[i][l][index_ct_tt], l*(l+1)*cl[i][l][index_ct_ee], l*(l+1)*cl[i][l][index_ct_te]); } fprintf(out,"\n"); } /* free Cl's array */ for (i=0; i<num_loops; i++) { for (l=0;l<l_max;l++) { free(cl[i][l]); } free(cl[i]); } free(cl); return _SUCCESS_; }
GB_binop__bclr_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint16) // C=scalar+B GB (_bind1st__bclr_uint16) // C=scalar+B' GB (_bind1st_tran__bclr_uint16) // C=A+scalar GB (_bind2nd__bclr_uint16) // C=A'+scalar GB (_bind2nd_tran__bclr_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITCLR (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT16 || GxB_NO_BCLR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bclr_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bclr_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__identity_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint16_uint16 // op(A') function: GB_tran__identity_uint16_uint16 // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint16_uint16 ( uint16_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint16_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kernels.c
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include "common.h" #include "kernels.h" void cpu_stencil(float c0,float c1, float *A0,float *Anext,const int nx, const int ny, const int nz) { int i, j, k; int size=nx*ny*nz; #pragma omp target map(alloc:A0[0:size], Anext[0:size]) #ifdef SPEC_USE_INNER_SIMD #pragma omp teams distribute parallel for collapse(2) #else #pragma omp teams distribute parallel for simd collapse(3) #endif for(k=1;k<nz-1;k++) { for(j=1;j<ny-1;j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for(i=1;i<nx-1;i++) { Anext[Index3D (nx, ny, i, j, k)] = (A0[Index3D (nx, ny, i, j, k + 1)] + A0[Index3D (nx, ny, i, j, k - 1)] + A0[Index3D (nx, ny, i, j + 1, k)] + A0[Index3D (nx, ny, i, j - 1, k)] + A0[Index3D (nx, ny, i + 1, j, k)] + A0[Index3D (nx, ny, i - 1, j, k)])*c1 - A0[Index3D (nx, ny, i, j, k)]*c0; } } } }
IndexSort.h
/** * @file * This file is part of XdmfWriter * * @author Sebastian Rettenberger <[email protected]> * * @copyright Copyright (c) 2017, Technische Universitaet Muenchen. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef XDMFWRITER_INDEXSORT_H #define XDMFWRITER_INDEXSORT_H #include <algorithm> namespace xdmfwriter { namespace internal { enum SortType { STABLE, UNSTABLE }; /** * Compares 3D-vertex indices according to the vertices */ template<typename T> class IndexedVertexComparator { private: const T *m_vertices; public: IndexedVertexComparator(const T *vertices) : m_vertices(vertices) { } bool operator() (unsigned int i, unsigned int j) { i *= 3; j *= 3; return (m_vertices[i] < m_vertices[j]) || (m_vertices[i] == m_vertices[j] && m_vertices[i+1] < m_vertices[j+1]) || (m_vertices[i] == m_vertices[j] && m_vertices[i+1] == m_vertices[j+1] && m_vertices[i+2] < m_vertices[j+2]); } }; template<SortType, typename T> struct RunSort { void operator()(unsigned int* first, unsigned int* last, const IndexedVertexComparator<T> &comparator); }; template<typename T> struct RunSort<STABLE, T> { void operator()(unsigned int* first, unsigned int* last, const IndexedVertexComparator<T> &comparator) { std::stable_sort(first, last, comparator); } }; template<typename T> struct RunSort<UNSTABLE, T> { void operator()(unsigned int* first, unsigned int* last, const IndexedVertexComparator<T> &comparator) { std::sort(first, last, comparator); } }; /** * Creates a sorted index */ template<SortType S, typename T> class IndexSort { public: static void sort(const T *vertices, unsigned int numVertices, unsigned int *sortedIndices) { #ifdef _OPENMP #pragma omp parallel for schedule(static) #endif for (unsigned int i = 0; i < numVertices; i++) sortedIndices[i] = i; IndexedVertexComparator<T> comparator(vertices); RunSort<S, T> runner; runner(sortedIndices, sortedIndices+numVertices, comparator); } }; } } #endif // XDMFWRITER_INDEXSORT_H
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickPixelPacket target[3], zero; RectangleInfo bounds; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetMagickPixelPacket(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[0]); GetMagickPixelPacket(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[1]); GetMagickPixelPacket(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const PixelPacket *) NULL) SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view), &target[2]); status=MagickTrue; GetMagickPixelPacket(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; RectangleInfo bounding_box; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((x < bounding_box.x) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDepth() returns the depth of a particular image channel. % % The format of the GetImageChannelDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % size_t GetImageChannelDepth(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { return(GetImageChannelDepth(image,CompositeChannels,exception)); } MagickExport size_t GetImageChannelDepth(const Image *image, const ChannelType channel,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse)) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((channel & RedChannel) != 0) if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (1UL*QuantumRange <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; if ((channel & RedChannel) != 0) { pixel=GetPixelRed(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & GreenChannel) != 0) { pixel=GetPixelGreen(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if ((channel & BlueChannel) != 0) { pixel=GetPixelBlue(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) { pixel=GetPixelOpacity(p); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) { pixel=GetPixelIndex(indexes+x); if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(pixel)]; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0)) if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0)) if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0)) if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse) atDepth=MagickTrue; if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,GetImageType(image)); % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IsMonochromeImage(image,exception) != MagickFalse) return(BilevelType); if (IsGrayImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IsPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->matte != MagickFalse)) type=GrayscaleMatteType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G r a y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGrayImage() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsGrayImage method is: % % MagickBooleanType IsGrayImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsGrayImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o n o c h r o m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMonochromeImage() returns MagickTrue if type of the image is bi-level. % % The format of the IsMonochromeImage method is: % % MagickBooleanType IsMonochromeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsOpaqueImage() returns MagickTrue if none of the pixels in the image have % an opacity value other than opaque (0). % % The format of the IsOpaqueImage method is: % % MagickBooleanType IsOpaqueImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsOpaqueImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const PixelPacket *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) break; p++; } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelDepth() sets the depth of the image. % % The format of the SetImageChannelDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth) % MagickBooleanType SetImageChannelDepth(Image *image, % const ChannelType channel,const size_t depth) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth) { return(SetImageChannelDepth(image,CompositeChannels,depth)); } MagickExport MagickBooleanType SetImageChannelDepth(Image *image, const ChannelType channel,const size_t depth) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel((MagickRealType) image->colormap[i].red),range),range); if ((channel & GreenChannel) != 0) image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel((MagickRealType) image->colormap[i].green),range),range); if ((channel & BlueChannel) != 0) image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel((MagickRealType) image->colormap[i].blue),range),range); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel((MagickRealType) image->colormap[i].opacity),range), range); } } status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if (1UL*QuantumRange <= MaxMap) RestoreMSCWarning { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]); if ((channel & GreenChannel) != 0) SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]); if ((channel & BlueChannel) != 0) SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( (MagickRealType) GetPixelRed(q)),range),range)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( (MagickRealType) GetPixelGreen(q)),range),range)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( (MagickRealType) GetPixelBlue(q)),range),range)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel( (MagickRealType) GetPixelOpacity(q)),range),range)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType, % PaletteMatteType, TrueColorType, TrueColorMatteType, % ColorSeparationType, ColorSeparationMatteType, OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace); (void) NormalizeImage(image); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); image->matte=MagickFalse; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace); image->matte=MagickFalse; break; } case GrayscaleMatteType: { status=TransformImageColorspace(image,GRAYColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); } image->matte=MagickFalse; break; } case PaletteBilevelMatteType: { status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); (void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteMatteType: { status=TransformImageColorspace(image,sRGBColorspace); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case TrueColorMatteType: { status=TransformImageColorspace(image,sRGBColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); image->matte=MagickFalse; break; } case ColorSeparationMatteType: { status=TransformImageColorspace(image,CMYKColorspace); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); }
blake2sp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <[email protected]> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 8 static int blake2sp_init_leaf( blake2s_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2s_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; store48( P->node_offset, offset ); P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2s_init_param( S, P ); S->outlen = P->inner_length; return 0; } static int blake2sp_init_root( blake2s_state *S, uint8_t outlen, uint8_t keylen ) { blake2s_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; store48( P->node_offset, 0ULL ); P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); blake2s_init_param( S, P ); S->outlen = P->digest_length; return 0; } int blake2sp_init( blake2sp_state *S, size_t outlen ) { if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2sp_init_root( S->R, ( uint8_t ) outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], ( uint8_t ) outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; return 0; } int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen ) { if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2sp_init_root( S->R, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; S->outlen = ( uint8_t ) outlen; { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2sp_update( blake2sp_state *S, const uint8_t *in, size_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S->S[id__], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = ( uint32_t ) left + ( uint32_t ) inlen; return 0; } int blake2sp_final( blake2sp_state *S, uint8_t *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; if(S->outlen != outlen) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2S_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES; if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES; blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left ); } blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES ); blake2s_final( S->R, out, outlen ); return 0; } int blake2sp( uint8_t *out, const void *in, const void *key, size_t outlen, size_t inlen, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0 ) return -1; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( keylen > BLAKE2S_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], ( uint8_t ) outlen, ( uint8_t ) keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) omp_set_num_threads(PARALLELISM_DEGREE); #pragma omp parallel shared(S,hash) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = ( size_t ) omp_get_thread_num(); #endif size_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[id__], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[id__], in__, len ); } blake2s_final( S[id__], hash[id__], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, ( uint8_t ) outlen, ( uint8_t ) keylen ) < 0 ) return -1; FS->last_node = 1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( FS, out, outlen ); }
Simulation.c
#include "XSbench_header.h" //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // implementation of the algorithm, with only minor CPU optimizations in place. // Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // are not yet implemented for this OpenMP targeting offload port. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype) { if( mype == 0) printf("Beginning event based simulation...\n"); //////////////////////////////////////////////////////////////////////////////// // SUMMARY: Simulation Data Structure Manifest for "SD" Object // Here we list all heap arrays (and lengths) in SD that would need to be // offloaded manually if using an accelerator with a seperate memory space //////////////////////////////////////////////////////////////////////////////// // int * num_nucs; // Length = length_num_nucs; // double * concs; // Length = length_concs // int * mats; // Length = length_mats // double * unionized_energy_array; // Length = length_unionized_energy_array // int * index_grid; // Length = length_index_grid // NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid // // Note: "unionized_energy_array" and "index_grid" can be of zero length // depending on lookup method. // // Note: "Lengths" are given as the number of objects in the array, not the // number of bytes. //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Begin Actual Simulation Loop //////////////////////////////////////////////////////////////////////////////// unsigned long long verification = 0; #pragma omp target teams distribute parallel for\ map(to: SD.max_num_nucs)\ map(to: SD.num_nucs[:SD.length_num_nucs])\ map(to: SD.concs[:SD.length_concs])\ map(to: SD.mats[:SD.length_mats])\ map(to: SD.unionized_energy_array[:SD.length_unionized_energy_array])\ map(to: SD.index_grid[:SD.length_index_grid])\ map(to: SD.nuclide_grid[:SD.length_nuclide_grid])\ reduction(+:verification) for( int i = 0; i < in.lookups; i++ ) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // debugging //printf("E = %lf mat = %d\n", p_energy, mat); double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation SD.num_nucs, // 1-D array with number of nuclides per material SD.concs, // Flattened 2-D array with concentration of each nuclide in each material SD.unionized_energy_array, // 1-D Unionized energy array SD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level SD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation SD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) SD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we prevent thread // contention by using an OMP reduction on the verification value. // For accelerators, a different approach might be required // (e.g., atomics, reduction of thread-specific values in large // array via CUDA thrust, etc). double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification += max_idx+1; } return verification; } // Calculates the microscopic cross section for a given nuclide & energy void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, double * egrid, int * index_data, NuclideGridPoint * nuclide_grids, long idx, double * xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint * low, * high; // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + idx - 1]; else low = &nuclide_grids[nuc*n_gridpoints + idx]; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1]; else low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]]; } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; int lower; if( p_energy <= e_low ) lower = 0; else if( p_energy >= e_high ) lower = n_gridpoints - 1; else lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high); if( lower == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + lower - 1]; else low = &nuclide_grids[nuc*n_gridpoints + lower]; } high = low + 1; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); // Total XS xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs); // Elastic XS xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs); // Absorbtion XS xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs); // Fission XS xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs); // Nu Fission XS xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs); //test /* if( omp_get_thread_num() == 0 ) { printf("Lookup: Energy = %lf, nuc = %d\n", p_energy, nuc); printf("e_h = %lf e_l = %lf\n", high->energy , low->energy); printf("xs_h = %lf xs_l = %lf\n", high->elastic_xs, low->elastic_xs); printf("total_xs = %lf\n\n", xs_vector[1]); } */ } // Calculates macroscopic cross section based on a given material & energy void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, int * num_nucs, double * concs, double * egrid, int * index_data, NuclideGridPoint * nuclide_grids, int * mats, double * macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } //test /* for( int k = 0; k < 5; k++ ) printf("Energy: %lf, Material: %d, XSVector[%d]: %lf\n", p_energy, mat, k, macro_xs_vector[k]); */ } // binary search for energy on unionized energy grid // returns lower index long grid_search( long n, double quarry, double * A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // binary search for energy on nuclide energy grid long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high) { long lowerLimit = low; long upperLimit = high; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint].energy > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // picks a material based on a probabilistic distribution int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. // Also could be argued that doing fractions by weight would be // a better approximation, but volume does a good enough job for now. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; //return ldexp(*seed, -63); } uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; }
convolutiondepthwise_3x3_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f); const __fp16* k0 = kernel.row<const __fp16>(g); __fp16* outptr0 = out.row<__fp16>(0); __fp16* outptr1 = out.row<__fp16>(1); const Mat img0 = bottom_blob.channel(g); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); const __fp16* r3 = img0.row<const __fp16>(3); float16x8_t _k00 = vld1q_f16(k0); float16x8_t _k01 = vld1q_f16(k0 + 8); float16x8_t _k02 = vld1q_f16(k0 + 16); float16x8_t _k10 = vld1q_f16(k0 + 24); float16x8_t _k11 = vld1q_f16(k0 + 32); float16x8_t _k12 = vld1q_f16(k0 + 40); float16x8_t _k20 = vld1q_f16(k0 + 48); float16x8_t _k21 = vld1q_f16(k0 + 56); float16x8_t _k22 = vld1q_f16(k0 + 64); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // r10 r11 r12 r13 "mov v24.16b, %21.16b \n" // sum00 "mov v25.16b, %21.16b \n" // sum01 "mov v26.16b, %21.16b \n" // sum02 "mov v27.16b, %21.16b \n" // sum03 "fmla v24.8h, %15.8h, v12.8h \n" "fmla v25.8h, %15.8h, v13.8h \n" "mov v28.16b, %21.16b \n" // sum10 "mov v29.16b, %21.16b \n" // sum11 "mov v30.16b, %21.16b \n" // sum12 "mov v31.16b, %21.16b \n" // sum13 "fmla v26.8h, %15.8h, v14.8h \n" "fmla v27.8h, %15.8h, v15.8h \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.8h, v17.8h}, [%3] \n" // r14 r15 "fmla v28.8h, %12.8h, v12.8h \n" "fmla v29.8h, %12.8h, v13.8h \n" "fmla v30.8h, %12.8h, v14.8h \n" "fmla v31.8h, %12.8h, v15.8h \n" "fmla v24.8h, %16.8h, v13.8h \n" "fmla v25.8h, %16.8h, v14.8h \n" "fmla v26.8h, %16.8h, v15.8h \n" "fmla v27.8h, %16.8h, v16.8h \n" "fmla v28.8h, %13.8h, v13.8h \n" "fmla v29.8h, %13.8h, v14.8h \n" "fmla v30.8h, %13.8h, v15.8h \n" "fmla v31.8h, %13.8h, v16.8h \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%4], #64 \n" // r20 r21 r22 r23 "fmla v24.8h, %17.8h, v14.8h \n" "fmla v25.8h, %17.8h, v15.8h \n" "fmla v26.8h, %17.8h, v16.8h \n" "fmla v27.8h, %17.8h, v17.8h \n" "fmla v28.8h, %14.8h, v14.8h \n" "fmla v29.8h, %14.8h, v15.8h \n" "fmla v30.8h, %14.8h, v16.8h \n" "fmla v31.8h, %14.8h, v17.8h \n" "fmla v24.8h, %18.8h, v18.8h \n" "fmla v25.8h, %18.8h, v19.8h \n" "fmla v26.8h, %18.8h, v20.8h \n" "fmla v27.8h, %18.8h, v21.8h \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v22.8h, v23.8h}, [%4] \n" // r24 r25 "fmla v28.8h, %15.8h, v18.8h \n" "fmla v29.8h, %15.8h, v19.8h \n" "fmla v30.8h, %15.8h, v20.8h \n" "fmla v31.8h, %15.8h, v21.8h \n" "fmla v24.8h, %19.8h, v19.8h \n" "fmla v25.8h, %19.8h, v20.8h \n" "fmla v26.8h, %19.8h, v21.8h \n" "fmla v27.8h, %19.8h, v22.8h \n" "fmla v28.8h, %16.8h, v19.8h \n" "fmla v29.8h, %16.8h, v20.8h \n" "fmla v30.8h, %16.8h, v21.8h \n" "fmla v31.8h, %16.8h, v22.8h \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2], #64 \n" // r00 r01 r02 r03 "fmla v24.8h, %20.8h, v20.8h \n" "fmla v25.8h, %20.8h, v21.8h \n" "fmla v26.8h, %20.8h, v22.8h \n" "fmla v27.8h, %20.8h, v23.8h \n" "fmla v28.8h, %17.8h, v20.8h \n" "fmla v29.8h, %17.8h, v21.8h \n" "fmla v30.8h, %17.8h, v22.8h \n" "fmla v31.8h, %17.8h, v23.8h \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%5], #64 \n" // r30 r31 r32 r33 "fmla v24.8h, %12.8h, v12.8h \n" "fmla v25.8h, %12.8h, v13.8h \n" "fmla v26.8h, %12.8h, v14.8h \n" "fmla v27.8h, %12.8h, v15.8h \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.8h, v17.8h}, [%2] \n" // r04 r05 "fmla v28.8h, %18.8h, v18.8h \n" "fmla v29.8h, %18.8h, v19.8h \n" "fmla v30.8h, %18.8h, v20.8h \n" "fmla v31.8h, %18.8h, v21.8h \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v22.8h, v23.8h}, [%5] \n" // r34 r35 "fmla v24.8h, %13.8h, v13.8h \n" "fmla v25.8h, %13.8h, v14.8h \n" "fmla v26.8h, %13.8h, v15.8h \n" "fmla v27.8h, %13.8h, v16.8h \n" "fmla v28.8h, %19.8h, v19.8h \n" "fmla v29.8h, %19.8h, v20.8h \n" "fmla v30.8h, %19.8h, v21.8h \n" "fmla v31.8h, %19.8h, v22.8h \n" "fmla v24.8h, %14.8h, v14.8h \n" "fmla v25.8h, %14.8h, v15.8h \n" "fmla v26.8h, %14.8h, v16.8h \n" "fmla v27.8h, %14.8h, v17.8h \n" "fmla v28.8h, %20.8h, v20.8h \n" "fmla v29.8h, %20.8h, v21.8h \n" "fmla v30.8h, %20.8h, v22.8h \n" "fmla v31.8h, %20.8h, v23.8h \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%0], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3] \n" // r10 r11 r12 r13 "mov v28.16b, %21.16b \n" // sum00 "mov v29.16b, %21.16b \n" // sum01 "mov v30.16b, %21.16b \n" // sum10 "mov v31.16b, %21.16b \n" // sum11 "fmla v28.8h, %15.8h, v16.8h \n" "fmla v30.8h, %12.8h, v16.8h \n" "fmla v29.8h, %15.8h, v17.8h \n" "fmla v31.8h, %12.8h, v17.8h \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%4] \n" // r20 r21 r22 r23 "fmla v28.8h, %16.8h, v17.8h \n" "fmla v30.8h, %13.8h, v17.8h \n" "fmla v29.8h, %16.8h, v18.8h \n" "fmla v31.8h, %13.8h, v18.8h \n" "fmla v28.8h, %17.8h, v18.8h \n" "fmla v30.8h, %14.8h, v18.8h \n" "fmla v29.8h, %17.8h, v19.8h \n" "fmla v31.8h, %14.8h, v19.8h \n" "fmla v28.8h, %18.8h, v20.8h \n" "fmla v30.8h, %15.8h, v20.8h \n" "fmla v29.8h, %18.8h, v21.8h \n" "fmla v31.8h, %15.8h, v21.8h \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2] \n" // r00 r01 r02 r03 "fmla v28.8h, %19.8h, v21.8h \n" "fmla v30.8h, %16.8h, v21.8h \n" "fmla v29.8h, %19.8h, v22.8h \n" "fmla v31.8h, %16.8h, v22.8h \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%5] \n" // r30 r31 r32 r33 "fmla v28.8h, %20.8h, v22.8h \n" "fmla v30.8h, %17.8h, v22.8h \n" "fmla v29.8h, %20.8h, v23.8h \n" "fmla v31.8h, %17.8h, v23.8h \n" "fmla v28.8h, %12.8h, v12.8h \n" "fmla v30.8h, %18.8h, v24.8h \n" "fmla v29.8h, %12.8h, v13.8h \n" "fmla v31.8h, %18.8h, v25.8h \n" "fmla v28.8h, %13.8h, v13.8h \n" "fmla v30.8h, %19.8h, v25.8h \n" "fmla v29.8h, %13.8h, v14.8h \n" "fmla v31.8h, %19.8h, v26.8h \n" "fmla v28.8h, %14.8h, v14.8h \n" "fmla v30.8h, %20.8h, v26.8h \n" "fmla v29.8h, %14.8h, v15.8h \n" "fmla v31.8h, %20.8h, v27.8h \n" "add %2, %2, #32 \n" "add %3, %3, #32 \n" "add %4, %4, #32 \n" "add %5, %5, #32 \n" "st1 {v28.8h, v29.8h}, [%0], #32 \n" "st1 {v30.8h, v31.8h}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%3, #384] \n" "ld1 {v15.8h, v16.8h, v17.8h}, [%3] \n" // r10 r11 r12 "mov v28.16b, %21.16b \n" // sum00 "mov v30.16b, %21.16b \n" // sum10 "fmul v29.8h, %15.8h, v15.8h \n" "fmul v31.8h, %12.8h, v15.8h \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v18.8h, v19.8h, v20.8h}, [%4] \n" // r20 r21 r22 "fmla v28.8h, %16.8h, v16.8h \n" "fmla v30.8h, %13.8h, v16.8h \n" "fmla v29.8h, %17.8h, v17.8h \n" "fmla v31.8h, %14.8h, v17.8h \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v12.8h, v13.8h, v14.8h}, [%2] \n" // r00 r01 r02 "fmla v28.8h, %18.8h, v18.8h \n" "fmla v30.8h, %15.8h, v18.8h \n" "fmla v29.8h, %19.8h, v19.8h \n" "fmla v31.8h, %16.8h, v19.8h \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v21.8h, v22.8h, v23.8h}, [%5] \n" // r30 r31 r32 "fmla v28.8h, %20.8h, v20.8h \n" "fmla v30.8h, %17.8h, v20.8h \n" "fmla v29.8h, %12.8h, v12.8h \n" "fmla v31.8h, %18.8h, v21.8h \n" "fmla v28.8h, %13.8h, v13.8h \n" "fmla v30.8h, %19.8h, v22.8h \n" "fmla v29.8h, %14.8h, v14.8h \n" "fmla v31.8h, %20.8h, v23.8h \n" "add %2, %2, #16 \n" "add %3, %3, #16 \n" "fadd v28.8h, v28.8h, v29.8h \n" "fadd v30.8h, v30.8h, v31.8h \n" "add %4, %4, #16 \n" "add %5, %5, #16 \n" "st1 {v28.8h}, [%0], #16 \n" "st1 {v30.8h}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } r0 += 2 * 8 + w * 8; r1 += 2 * 8 + w * 8; r2 += 2 * 8 + w * 8; r3 += 2 * 8 + w * 8; outptr0 += outw * 8; outptr1 += outw * 8; } for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1], #64 \n" // r00 r01 r02 r03 "mov v28.16b, %17.16b \n" // sum00 "mov v29.16b, %17.16b \n" // sum01 "mov v30.16b, %17.16b \n" // sum02 "mov v31.16b, %17.16b \n" // sum03 "fmla v28.8h, %8.8h, v12.8h \n" "fmla v29.8h, %8.8h, v13.8h \n" "fmla v30.8h, %8.8h, v14.8h \n" "fmla v31.8h, %8.8h, v15.8h \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.8h, v17.8h}, [%1] \n" // r04 r05 "fmla v28.8h, %9.8h, v13.8h \n" "fmla v29.8h, %9.8h, v14.8h \n" "fmla v30.8h, %9.8h, v15.8h \n" "fmla v31.8h, %9.8h, v16.8h \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v18.8h, v19.8h, v20.8h, v21.8h}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.8h, %10.8h, v14.8h \n" "fmla v29.8h, %10.8h, v15.8h \n" "fmla v30.8h, %10.8h, v16.8h \n" "fmla v31.8h, %10.8h, v17.8h \n" "fmla v28.8h, %11.8h, v18.8h \n" "fmla v29.8h, %11.8h, v19.8h \n" "fmla v30.8h, %11.8h, v20.8h \n" "fmla v31.8h, %11.8h, v21.8h \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v22.8h, v23.8h}, [%2] \n" // r14 r15 "fmla v28.8h, %12.8h, v19.8h \n" "fmla v29.8h, %12.8h, v20.8h \n" "fmla v30.8h, %12.8h, v21.8h \n" "fmla v31.8h, %12.8h, v22.8h \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.8h, %13.8h, v20.8h \n" "fmla v29.8h, %13.8h, v21.8h \n" "fmla v30.8h, %13.8h, v22.8h \n" "fmla v31.8h, %13.8h, v23.8h \n" "fmla v28.8h, %14.8h, v12.8h \n" "fmla v29.8h, %14.8h, v13.8h \n" "fmla v30.8h, %14.8h, v14.8h \n" "fmla v31.8h, %14.8h, v15.8h \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.8h, v17.8h}, [%3] \n" // r24 r25 "fmla v28.8h, %15.8h, v13.8h \n" "fmla v29.8h, %15.8h, v14.8h \n" "fmla v30.8h, %15.8h, v15.8h \n" "fmla v31.8h, %15.8h, v16.8h \n" "fmla v28.8h, %16.8h, v14.8h \n" "fmla v29.8h, %16.8h, v15.8h \n" "fmla v30.8h, %16.8h, v16.8h \n" "fmla v31.8h, %16.8h, v17.8h \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1] \n" // r00 r01 r02 r03 "mov v28.16b, %17.16b \n" // sum00 "mov v29.16b, %17.16b \n" // sum01 "fmul v30.8h, %8.8h, v12.8h \n" "fmul v31.8h, %8.8h, v13.8h \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2] \n" // r10 r11 r12 r13 "fmla v28.8h, %9.8h, v13.8h \n" "fmla v29.8h, %9.8h, v14.8h \n" "fmla v30.8h, %10.8h, v14.8h \n" "fmla v31.8h, %10.8h, v15.8h \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%3] \n" // r20 r21 r22 r23 "fmla v28.8h, %11.8h, v16.8h \n" "fmla v29.8h, %11.8h, v17.8h \n" "fmla v30.8h, %12.8h, v17.8h \n" "fmla v31.8h, %12.8h, v18.8h \n" "fmla v28.8h, %13.8h, v18.8h \n" "fmla v29.8h, %13.8h, v19.8h \n" "fmla v30.8h, %14.8h, v20.8h \n" "fmla v31.8h, %14.8h, v21.8h \n" "fmla v28.8h, %15.8h, v21.8h \n" "fmla v29.8h, %15.8h, v22.8h \n" "fmla v30.8h, %16.8h, v22.8h \n" "fmla v31.8h, %16.8h, v23.8h \n" "add %1, %1, #32 \n" "fadd v28.8h, v28.8h, v30.8h \n" "fadd v29.8h, v29.8h, v31.8h \n" "add %2, %2, #32 \n" "add %3, %3, #32 \n" "st1 {v28.8h, v29.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%1, #384] \n" "ld1 {v12.8h, v13.8h, v14.8h}, [%1] \n" // r00 r01 r02 "mov v28.16b, %17.16b \n" // sum00 "fmul v29.8h, %8.8h, v12.8h \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v15.8h, v16.8h, v17.8h}, [%2] \n" // r10 r11 r12 "fmul v30.8h, %9.8h, v13.8h \n" "fmul v28.8h, %10.8h, v14.8h \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v18.8h, v19.8h, v20.8h}, [%3] \n" // r20 r21 r22 "fmla v29.8h, %11.8h, v15.8h \n" "fmla v30.8h, %12.8h, v16.8h \n" "fmla v28.8h, %13.8h, v17.8h \n" "fmla v29.8h, %14.8h, v18.8h \n" "fmla v30.8h, %15.8h, v19.8h \n" "fmla v28.8h, %16.8h, v20.8h \n" "add %1, %1, #16 \n" "fadd v29.8h, v29.8h, v30.8h \n" "fadd v28.8h, v28.8h, v29.8h \n" "add %2, %2, #16 \n" "add %3, %3, #16 \n" "st1 {v28.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v28", "v29", "v30"); } r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; } } } static void convdw3x3s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 8; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float16x8_t _bias0 = bias ? vld1q_f16(bias + g * 8) : vdupq_n_f16((__fp16)0.f); const __fp16* k0 = kernel.row<const __fp16>(g); __fp16* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); float16x8_t _k00 = vld1q_f16(k0); float16x8_t _k01 = vld1q_f16(k0 + 8); float16x8_t _k02 = vld1q_f16(k0 + 16); float16x8_t _k10 = vld1q_f16(k0 + 24); float16x8_t _k11 = vld1q_f16(k0 + 32); float16x8_t _k12 = vld1q_f16(k0 + 40); float16x8_t _k20 = vld1q_f16(k0 + 48); float16x8_t _k21 = vld1q_f16(k0 + 56); float16x8_t _k22 = vld1q_f16(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03 "mov v28.16b, %17.16b \n" // sum00 "mov v29.16b, %17.16b \n" // sum01 "mov v30.16b, %17.16b \n" // sum02 "mov v31.16b, %17.16b \n" // sum03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1], #64 \n" // r04 r05 r06 r07 "fmla v28.8h, %8.8h, v0.8h \n" "fmla v29.8h, %8.8h, v2.8h \n" "fmla v30.8h, %8.8h, v4.8h \n" "fmla v31.8h, %8.8h, v6.8h \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v8.8h}, [%1] \n" // r08 "fmla v28.8h, %9.8h, v1.8h \n" "fmla v29.8h, %9.8h, v3.8h \n" "fmla v30.8h, %9.8h, v5.8h \n" "fmla v31.8h, %9.8h, v7.8h \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.8h, %10.8h, v2.8h \n" "fmla v29.8h, %10.8h, v4.8h \n" "fmla v30.8h, %10.8h, v6.8h \n" "fmla v31.8h, %10.8h, v8.8h \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v28.8h, %11.8h, v16.8h \n" "fmla v29.8h, %11.8h, v18.8h \n" "fmla v30.8h, %11.8h, v20.8h \n" "fmla v31.8h, %11.8h, v22.8h \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v24.8h}, [%2] \n" // r18 "fmla v28.8h, %12.8h, v17.8h \n" "fmla v29.8h, %12.8h, v19.8h \n" "fmla v30.8h, %12.8h, v21.8h \n" "fmla v31.8h, %12.8h, v23.8h \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.8h, %13.8h, v18.8h \n" "fmla v29.8h, %13.8h, v20.8h \n" "fmla v30.8h, %13.8h, v22.8h \n" "fmla v31.8h, %13.8h, v24.8h \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v28.8h, %14.8h, v0.8h \n" "fmla v29.8h, %14.8h, v2.8h \n" "fmla v30.8h, %14.8h, v4.8h \n" "fmla v31.8h, %14.8h, v6.8h \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v8.8h}, [%3] \n" // r28 "fmla v28.8h, %15.8h, v1.8h \n" "fmla v29.8h, %15.8h, v3.8h \n" "fmla v30.8h, %15.8h, v5.8h \n" "fmla v31.8h, %15.8h, v7.8h \n" "fmla v28.8h, %16.8h, v2.8h \n" "fmla v29.8h, %16.8h, v4.8h \n" "fmla v30.8h, %16.8h, v6.8h \n" "fmla v31.8h, %16.8h, v8.8h \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%1], #64 \n" // r00 r01 r02 r03 "mov v28.16b, %17.16b \n" // sum00 "mov v29.16b, %17.16b \n" // sum01 "fmul v30.8h, %8.8h, v12.8h \n" "fmul v31.8h, %8.8h, v14.8h \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v16.8h}, [%1] \n" // r04 "fmla v28.8h, %9.8h, v13.8h \n" "fmla v29.8h, %9.8h, v15.8h \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v17.8h, v18.8h, v19.8h, v20.8h}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v30.8h, %10.8h, v14.8h \n" "fmla v31.8h, %10.8h, v16.8h \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.8h}, [%1] \n" // r14 "fmla v28.8h, %11.8h, v17.8h \n" "fmla v29.8h, %11.8h, v19.8h \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v22.8h, v23.8h, v24.8h, v25.8h}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v30.8h, %12.8h, v18.8h \n" "fmla v31.8h, %12.8h, v20.8h \n" "fmla v28.8h, %13.8h, v19.8h \n" "fmla v29.8h, %13.8h, v21.8h \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v26.8h}, [%1] \n" // r24 "fmla v30.8h, %14.8h, v22.8h \n" "fmla v31.8h, %14.8h, v24.8h \n" "fmla v28.8h, %15.8h, v23.8h \n" "fmla v29.8h, %15.8h, v25.8h \n" "fmla v30.8h, %16.8h, v24.8h \n" "fmla v31.8h, %16.8h, v26.8h \n" "fadd v28.8h, v28.8h, v30.8h \n" "fadd v29.8h, v29.8h, v31.8h \n" "st1 {v28.8h, v29.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%1, #384] \n" "ld1 {v12.8h, v13.8h, v14.8h}, [%1] \n" // r00 r01 r02 "mov v28.16b, %17.16b \n" // sum00 "fmul v29.8h, %8.8h, v12.8h \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v15.8h, v16.8h, v17.8h}, [%2] \n" // r10 r11 r12 "fmul v30.8h, %9.8h, v13.8h \n" "fmla v28.8h, %10.8h, v14.8h \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v18.8h, v19.8h, v20.8h}, [%3] \n" // r20 r21 r22 "fmla v29.8h, %11.8h, v15.8h \n" "fmla v30.8h, %12.8h, v16.8h \n" "fmla v28.8h, %13.8h, v17.8h \n" "fmla v29.8h, %14.8h, v18.8h \n" "fmla v30.8h, %15.8h, v19.8h \n" "fmla v28.8h, %16.8h, v20.8h \n" "add %1, %1, #32 \n" "fadd v29.8h, v29.8h, v30.8h \n" "fadd v28.8h, v28.8h, v29.8h \n" "add %2, %2, #32 \n" "add %3, %3, #32 \n" "st1 {v28.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v28", "v29", "v30"); } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
GB_binop__bxor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int16) // C=scalar+B GB (_bind1st__bxor_int16) // C=scalar+B' GB (_bind1st_tran__bxor_int16) // C=A+scalar GB (_bind2nd__bxor_int16) // C=A'+scalar GB (_bind2nd_tran__bxor_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT16 || GxB_NO_BXOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pi-v2.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if !_DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if !_DEBUG_ start= omp_get_wtime(); #endif /* do computation -- using all available threads */ // WARNING : incorrect code #pragma omp parallel private(i, x) { #if _DEBUG_ int id = omp_get_thread_num(); #endif for (i=0; i < num_steps; ++i) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } } pi = step * sum; #if !_DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
GB_unaryop__minv_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_uint32 // op(A') function: GB_tran__minv_bool_uint32 // C type: bool // A type: uint32_t // cast: ; // unaryop: cij = true #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_uint32 ( bool *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
glove_cython.c
/* Generated by Cython 0.20.1post0 (Debian 0.20.1+git90-g0e6e38e-1ubuntu2) on Sat May 23 21:00:29 2015 */ #define PY_SSIZE_T_CLEAN #ifndef CYTHON_USE_PYLONG_INTERNALS #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 0 #else #include "pyconfig.h" #ifdef PYLONG_BITS_IN_DIGIT #define CYTHON_USE_PYLONG_INTERNALS 1 #else #define CYTHON_USE_PYLONG_INTERNALS 0 #endif #endif #endif #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02040000 #error Cython requires Python 2.4+. #else #define CYTHON_ABI "0_20_1post0" #include <stddef.h> /* For offsetof */ #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if CYTHON_COMPILING_IN_PYPY #define Py_OptimizeFlag 0 #endif #if PY_VERSION_HEX < 0x02050000 typedef int Py_ssize_t; #define PY_SSIZE_T_MAX INT_MAX #define PY_SSIZE_T_MIN INT_MIN #define PY_FORMAT_SIZE_T "" #define CYTHON_FORMAT_SSIZE_T "" #define PyInt_FromSsize_t(z) PyInt_FromLong(z) #define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o) #define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \ (PyErr_Format(PyExc_TypeError, \ "expected index value, got %.200s", Py_TYPE(o)->tp_name), \ (PyObject*)0)) #define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \ !PyComplex_Check(o)) #define PyIndex_Check __Pyx_PyIndex_Check #define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message) #define __PYX_BUILD_PY_SSIZE_T "i" #else #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #define __Pyx_PyIndex_Check PyIndex_Check #endif #if PY_VERSION_HEX < 0x02060000 #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #define PyType_Modified(t) typedef struct { void *buf; PyObject *obj; Py_ssize_t len; Py_ssize_t itemsize; int readonly; int ndim; char *format; Py_ssize_t *shape; Py_ssize_t *strides; Py_ssize_t *suboffsets; void *internal; } Py_buffer; #define PyBUF_SIMPLE 0 #define PyBUF_WRITABLE 0x0001 #define PyBUF_FORMAT 0x0004 #define PyBUF_ND 0x0008 #define PyBUF_STRIDES (0x0010 | PyBUF_ND) #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) #define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE) #define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE) typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); typedef void (*releasebufferproc)(PyObject *, Py_buffer *); #endif #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #if PY_VERSION_HEX < 0x02060000 #define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict") #endif #if PY_MAJOR_VERSION >= 3 #define Py_TPFLAGS_CHECKTYPES 0 #define Py_TPFLAGS_HAVE_INDEX 0 #endif #if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3) #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #if PY_VERSION_HEX < 0x02060000 #define Py_TPFLAGS_HAVE_VERSION_TAG 0 #endif #if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT) #define Py_TPFLAGS_IS_ABSTRACT 0 #endif #if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE) #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_VERSION_HEX < 0x02060000 #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_FromString PyString_FromString #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_DecodeEscape PyString_DecodeEscape #define PyBytes_AsString PyString_AsString #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define PyBytes_Size PyString_Size #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \ PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #if PY_VERSION_HEX < 0x02060000 #define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type) #define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300) #define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b) #define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value) #define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b) #else #define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0))) #define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1))) #define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \ (PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \ (likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \ (PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1))) #endif #if PY_MAJOR_VERSION >= 3 #define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n))) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n))) #else #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n)) #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a)) #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n)) #endif #if PY_VERSION_HEX < 0x02050000 #define __Pyx_NAMESTR(n) ((char *)(n)) #define __Pyx_DOCSTR(n) ((char *)(n)) #else #define __Pyx_NAMESTR(n) (n) #define __Pyx_DOCSTR(n) (n) #endif #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { /* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is a quiet NaN. */ float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #ifdef __cplusplus template<typename T> void __Pyx_call_destructor(T* x) { x->~T(); } #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #define __PYX_HAVE__glove__glove_cython #define __PYX_HAVE_API__glove__glove_cython #include "math.h" #include "pythread.h" #include "string.h" #include "stdlib.h" #include "stdio.h" #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/ #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \ (sizeof(type) < sizeof(Py_ssize_t)) || \ (sizeof(type) > sizeof(Py_ssize_t) && \ likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX) && \ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \ v == (type)PY_SSIZE_T_MIN))) || \ (sizeof(type) == sizeof(Py_ssize_t) && \ (is_signed || likely(v < (type)PY_SSIZE_T_MAX || \ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return u_end - u - 1; } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { const char* default_encoding_c = PyBytes_AS_STRING(default_encoding); char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (ascii_chars_u == NULL) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } } Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys = NULL; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (sys == NULL) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); if (default_encoding == NULL) goto bad; default_encoding_c = PyBytes_AS_STRING(default_encoding); __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(sys); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(sys); Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "glove_cython.pyx", "stringsource", }; struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; /* for error messages only */ struct __Pyx_StructField_* fields; size_t size; /* sizeof(type) */ size_t arraysize[8]; /* length of array in each dimension */ int ndim; char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */ char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && MSC_VER #include <Windows.h> #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using MSVC atomics" #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview) \ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview) \ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview) \ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview) \ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":99 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":269 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":302 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":922 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":302 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":922 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil) \ if (acquire_gil) { \ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ PyGILState_Release(__pyx_gilstate_save); \ } else { \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil) \ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext() \ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif /* CYTHON_REFNANNY */ #define __Pyx_XDECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_XDECREF(tmp); \ } while (0) #define __Pyx_DECREF_SET(r, v) do { \ PyObject *tmp = (PyObject *) r; \ r = v; __Pyx_DECREF(tmp); \ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \ const char* function_name); /*proto*/ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/ #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/ #include <string.h> static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif #define UNARY_NEG_WOULD_OVERFLOW(x) (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/ static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); /*proto*/ static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); /*proto*/ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback); /*proto*/ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *); static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *); static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs, char order, int ndim); static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); static int __Pyx_check_binary_version(void); typedef struct { int code_line; PyCodeObject* code_object; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /*proto*/ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/ /* Module declarations from 'glove.glove_cython' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static CYTHON_INLINE double __pyx_f_5glove_12glove_cython_double_min(double, double); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; #define __Pyx_MODULE_NAME "glove.glove_cython" int __pyx_module_is_main_glove__glove_cython = 0; /* Implementation of 'glove.glove_cython' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_xrange; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static PyObject *__pyx_pf_5glove_12glove_cython_fit_vectors(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_sum_gradients, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_wordbias_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_col, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, double __pyx_v_max_loss, CYTHON_UNUSED int __pyx_v_no_threads); /* proto */ static PyObject *__pyx_pf_5glove_12glove_cython_2transform_paragraph(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_paragraphvec, __Pyx_memviewslice __pyx_v_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, int __pyx_v_epochs); /* proto */ static int __pyx_array_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *get_memview_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static int __pyx_MemviewEnum_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static int __pyx_memoryview_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static void __pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static char __pyx_k_O[] = "O"; static char __pyx_k_c[] = "c"; static char __pyx_k_i[] = "i"; static char __pyx_k_j[] = "j"; static char __pyx_k_id[] = "id"; static char __pyx_k_np[] = "np"; static char __pyx_k_sp[] = "sp"; static char __pyx_k__12[] = "*"; static char __pyx_k_col[] = "col"; static char __pyx_k_dim[] = "dim"; static char __pyx_k_obj[] = "obj"; static char __pyx_k_row[] = "row"; static char __pyx_k_base[] = "base"; static char __pyx_k_loss[] = "loss"; static char __pyx_k_main[] = "__main__"; static char __pyx_k_mode[] = "mode"; static char __pyx_k_name[] = "name"; static char __pyx_k_ndim[] = "ndim"; static char __pyx_k_pack[] = "pack"; static char __pyx_k_size[] = "size"; static char __pyx_k_step[] = "step"; static char __pyx_k_stop[] = "stop"; static char __pyx_k_test[] = "__test__"; static char __pyx_k_alpha[] = "alpha"; static char __pyx_k_class[] = "__class__"; static char __pyx_k_count[] = "count"; static char __pyx_k_epoch[] = "epoch"; static char __pyx_k_error[] = "error"; static char __pyx_k_flags[] = "flags"; static char __pyx_k_numpy[] = "numpy"; static char __pyx_k_range[] = "range"; static char __pyx_k_shape[] = "shape"; static char __pyx_k_start[] = "start"; static char __pyx_k_counts[] = "counts"; static char __pyx_k_epochs[] = "epochs"; static char __pyx_k_format[] = "format"; static char __pyx_k_import[] = "__import__"; static char __pyx_k_name_2[] = "__name__"; static char __pyx_k_struct[] = "struct"; static char __pyx_k_unpack[] = "unpack"; static char __pyx_k_word_a[] = "word_a"; static char __pyx_k_word_b[] = "word_b"; static char __pyx_k_xrange[] = "xrange"; static char __pyx_k_fortran[] = "fortran"; static char __pyx_k_memview[] = "memview"; static char __pyx_k_wordvec[] = "wordvec"; static char __pyx_k_Ellipsis[] = "Ellipsis"; static char __pyx_k_gradient[] = "gradient"; static char __pyx_k_itemsize[] = "itemsize"; static char __pyx_k_max_loss[] = "max_loss"; static char __pyx_k_wordbias[] = "wordbias"; static char __pyx_k_TypeError[] = "TypeError"; static char __pyx_k_enumerate[] = "enumerate"; static char __pyx_k_max_count[] = "max_count"; static char __pyx_k_IndexError[] = "IndexError"; static char __pyx_k_ValueError[] = "ValueError"; static char __pyx_k_no_threads[] = "no_threads"; static char __pyx_k_prediction[] = "prediction"; static char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static char __pyx_k_MemoryError[] = "MemoryError"; static char __pyx_k_collections[] = "collections"; static char __pyx_k_fit_vectors[] = "fit_vectors"; static char __pyx_k_entry_weight[] = "entry_weight"; static char __pyx_k_paragraphvec[] = "paragraphvec"; static char __pyx_k_scipy_sparse[] = "scipy.sparse"; static char __pyx_k_learning_rate[] = "learning_rate"; static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static char __pyx_k_shuffle_index[] = "shuffle_index"; static char __pyx_k_sum_gradients[] = "sum_gradients"; static char __pyx_k_allocate_buffer[] = "allocate_buffer"; static char __pyx_k_dtype_is_object[] = "dtype_is_object"; static char __pyx_k_shuffle_indices[] = "shuffle_indices"; static char __pyx_k_no_cooccurrences[] = "no_cooccurrences"; static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer"; static char __pyx_k_glove_glove_cython[] = "glove.glove_cython"; static char __pyx_k_strided_and_direct[] = "<strided and direct>"; static char __pyx_k_transform_paragraph[] = "transform_paragraph"; static char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static char __pyx_k_initial_learning_rate[] = "initial_learning_rate"; static char __pyx_k_wordvec_sum_gradients[] = "wordvec_sum_gradients"; static char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static char __pyx_k_wordbias_sum_gradients[] = "wordbias_sum_gradients"; static char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static char __pyx_k_getbuffer_obj_view_flags[] = "getbuffer(obj, view, flags)"; static char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct"; static char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)"; static char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)"; static char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static char __pyx_k_home_chandras_Desktop_phrase2ve[] = "/home/chandras/Desktop/phrase2vec/hindi_dataset/orig_glove/glove/glove_cython.pyx"; static char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced"; static char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions"; static char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s__12; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_alpha; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_col; static PyObject *__pyx_n_s_collections; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_count; static PyObject *__pyx_n_s_counts; static PyObject *__pyx_n_s_dim; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_entry_weight; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_epoch; static PyObject *__pyx_n_s_epochs; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_fit_vectors; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_glove_glove_cython; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_gradient; static PyObject *__pyx_kp_s_home_chandras_Desktop_phrase2ve; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_initial_learning_rate; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_learning_rate; static PyObject *__pyx_n_s_loss; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_max_count; static PyObject *__pyx_n_s_max_loss; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_no_cooccurrences; static PyObject *__pyx_n_s_no_threads; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_paragraphvec; static PyObject *__pyx_n_s_prediction; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_releasebuffer; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_row; static PyObject *__pyx_n_s_scipy_sparse; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_shuffle_index; static PyObject *__pyx_n_s_shuffle_indices; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_sp; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_sum_gradients; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_transform_paragraph; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_word_a; static PyObject *__pyx_n_s_word_b; static PyObject *__pyx_n_s_wordbias; static PyObject *__pyx_n_s_wordbias_sum_gradients; static PyObject *__pyx_n_s_wordvec; static PyObject *__pyx_n_s_wordvec_sum_gradients; static PyObject *__pyx_n_s_xrange; static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_codeobj__14; static PyObject *__pyx_codeobj__16; /* "glove/glove_cython.pyx":10 * * * cdef inline double double_min(double a, double b) nogil: return a if a <= b else b # <<<<<<<<<<<<<< * cdef inline int int_min(int a, int b) nogil: return a if a <= b else b * cdef inline int int_max(int a, int b) nogil: return a if a > b else b */ static CYTHON_INLINE double __pyx_f_5glove_12glove_cython_double_min(double __pyx_v_a, double __pyx_v_b) { double __pyx_r; double __pyx_t_1; if (((__pyx_v_a <= __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "glove/glove_cython.pyx":11 * * cdef inline double double_min(double a, double b) nogil: return a if a <= b else b * cdef inline int int_min(int a, int b) nogil: return a if a <= b else b # <<<<<<<<<<<<<< * cdef inline int int_max(int a, int b) nogil: return a if a > b else b * */ static CYTHON_INLINE int __pyx_f_5glove_12glove_cython_int_min(int __pyx_v_a, int __pyx_v_b) { int __pyx_r; int __pyx_t_1; if (((__pyx_v_a <= __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "glove/glove_cython.pyx":12 * cdef inline double double_min(double a, double b) nogil: return a if a <= b else b * cdef inline int int_min(int a, int b) nogil: return a if a <= b else b * cdef inline int int_max(int a, int b) nogil: return a if a > b else b # <<<<<<<<<<<<<< * * */ static CYTHON_INLINE int __pyx_f_5glove_12glove_cython_int_max(int __pyx_v_a, int __pyx_v_b) { int __pyx_r; int __pyx_t_1; if (((__pyx_v_a > __pyx_v_b) != 0)) { __pyx_t_1 = __pyx_v_a; } else { __pyx_t_1 = __pyx_v_b; } __pyx_r = __pyx_t_1; goto __pyx_L0; /* function exit code */ __pyx_L0:; return __pyx_r; } /* "glove/glove_cython.pyx":20 * * * def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[:, ::1] wordvec_sum_gradients, * double[::1] wordbias, */ /* Python wrapper */ static PyObject *__pyx_pw_5glove_12glove_cython_1fit_vectors(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5glove_12glove_cython_fit_vectors[] = "\n Estimate GloVe word embeddings given the cooccurrence matrix.\n Modifies the word vector and word bias array in-place.\n\n Training is performed via asynchronous stochastic gradient descent,\n using the AdaGrad per-coordinate learning rate.\n "; static PyMethodDef __pyx_mdef_5glove_12glove_cython_1fit_vectors = {__Pyx_NAMESTR("fit_vectors"), (PyCFunction)__pyx_pw_5glove_12glove_cython_1fit_vectors, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5glove_12glove_cython_fit_vectors)}; static PyObject *__pyx_pw_5glove_12glove_cython_1fit_vectors(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_wordvec = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordvec_sum_gradients = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordbias = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordbias_sum_gradients = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_row = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_col = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_counts = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_shuffle_indices = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_initial_learning_rate; double __pyx_v_max_count; double __pyx_v_alpha; double __pyx_v_max_loss; CYTHON_UNUSED int __pyx_v_no_threads; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("fit_vectors (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wordvec,&__pyx_n_s_wordvec_sum_gradients,&__pyx_n_s_wordbias,&__pyx_n_s_wordbias_sum_gradients,&__pyx_n_s_row,&__pyx_n_s_col,&__pyx_n_s_counts,&__pyx_n_s_shuffle_indices,&__pyx_n_s_initial_learning_rate,&__pyx_n_s_max_count,&__pyx_n_s_alpha,&__pyx_n_s_max_loss,&__pyx_n_s_no_threads,0}; PyObject* values[13] = {0,0,0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wordvec)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wordvec_sum_gradients)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wordbias)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wordbias_sum_gradients)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_row)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_col)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_counts)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shuffle_indices)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_initial_learning_rate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_max_count)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 11: if (likely((values[11] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_max_loss)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 11); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 12: if (likely((values[12] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_no_threads)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, 12); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "fit_vectors") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 13) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); values[11] = PyTuple_GET_ITEM(__pyx_args, 11); values[12] = PyTuple_GET_ITEM(__pyx_args, 12); } __pyx_v_wordvec = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0]); if (unlikely(!__pyx_v_wordvec.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_wordvec_sum_gradients = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[1]); if (unlikely(!__pyx_v_wordvec_sum_gradients.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 21; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_wordbias = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2]); if (unlikely(!__pyx_v_wordbias.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_wordbias_sum_gradients = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[3]); if (unlikely(!__pyx_v_wordbias_sum_gradients.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_row = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[4]); if (unlikely(!__pyx_v_row.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_col = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[5]); if (unlikely(!__pyx_v_col.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_counts = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[6]); if (unlikely(!__pyx_v_counts.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_shuffle_indices = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[7]); if (unlikely(!__pyx_v_shuffle_indices.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_initial_learning_rate = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_initial_learning_rate == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_max_count = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_max_count == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 29; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[10]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 30; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_max_loss = __pyx_PyFloat_AsDouble(values[11]); if (unlikely((__pyx_v_max_loss == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_no_threads = __Pyx_PyInt_As_int(values[12]); if (unlikely((__pyx_v_no_threads == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 32; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("fit_vectors", 1, 13, 13, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("glove.glove_cython.fit_vectors", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5glove_12glove_cython_fit_vectors(__pyx_self, __pyx_v_wordvec, __pyx_v_wordvec_sum_gradients, __pyx_v_wordbias, __pyx_v_wordbias_sum_gradients, __pyx_v_row, __pyx_v_col, __pyx_v_counts, __pyx_v_shuffle_indices, __pyx_v_initial_learning_rate, __pyx_v_max_count, __pyx_v_alpha, __pyx_v_max_loss, __pyx_v_no_threads); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5glove_12glove_cython_fit_vectors(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordvec_sum_gradients, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_wordbias_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_col, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, double __pyx_v_max_loss, CYTHON_UNUSED int __pyx_v_no_threads) { int __pyx_v_dim; CYTHON_UNUSED int __pyx_v_no_cooccurrences; int __pyx_v_word_a; int __pyx_v_word_b; double __pyx_v_count; double __pyx_v_learning_rate; double __pyx_v_gradient; double __pyx_v_prediction; double __pyx_v_entry_weight; double __pyx_v_loss; int __pyx_v_i; int __pyx_v_j; int __pyx_v_shuffle_index; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_t_19; int __pyx_t_20; int __pyx_t_21; int __pyx_t_22; int __pyx_t_23; int __pyx_t_24; int __pyx_t_25; int __pyx_t_26; int __pyx_t_27; int __pyx_t_28; int __pyx_t_29; int __pyx_t_30; int __pyx_t_31; int __pyx_t_32; int __pyx_t_33; int __pyx_t_34; int __pyx_t_35; int __pyx_t_36; int __pyx_t_37; int __pyx_t_38; int __pyx_t_39; int __pyx_t_40; __Pyx_RefNannySetupContext("fit_vectors", 0); /* "glove/glove_cython.pyx":43 * # Get number of latent dimensions and * # number of cooccurrences. * cdef int dim = wordvec.shape[1] # <<<<<<<<<<<<<< * cdef int no_cooccurrences = row.shape[0] * */ __pyx_v_dim = (__pyx_v_wordvec.shape[1]); /* "glove/glove_cython.pyx":44 * # number of cooccurrences. * cdef int dim = wordvec.shape[1] * cdef int no_cooccurrences = row.shape[0] # <<<<<<<<<<<<<< * * # Hold indices of current words and */ __pyx_v_no_cooccurrences = (__pyx_v_row.shape[0]); /* "glove/glove_cython.pyx":59 * # We iterate over random indices to simulate * # shuffling the cooccurrence matrix. * with nogil: # <<<<<<<<<<<<<< * for j in prange(no_cooccurrences, num_threads=no_threads, * schedule='dynamic'): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { /* "glove/glove_cython.pyx":60 * # shuffling the cooccurrence matrix. * with nogil: * for j in prange(no_cooccurrences, num_threads=no_threads, # <<<<<<<<<<<<<< * schedule='dynamic'): * shuffle_index = shuffle_indices[j] */ __pyx_t_1 = __pyx_v_no_cooccurrences; if (1 == 0) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(__pyx_v_no_threads) private(__pyx_t_31, __pyx_t_37, __pyx_t_10, __pyx_t_23, __pyx_t_6, __pyx_t_36, __pyx_t_16, __pyx_t_13, __pyx_t_24, __pyx_t_5, __pyx_t_30, __pyx_t_35, __pyx_t_25, __pyx_t_40, __pyx_t_19, __pyx_t_18, __pyx_t_12, __pyx_t_26, __pyx_t_20, __pyx_t_4, __pyx_t_34, __pyx_t_39, __pyx_t_28, __pyx_t_9, __pyx_t_21, __pyx_t_29, __pyx_t_15, __pyx_t_14, __pyx_t_33, __pyx_t_27, __pyx_t_22, __pyx_t_32, __pyx_t_38, __pyx_t_8, __pyx_t_17, __pyx_t_11, __pyx_t_7) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for lastprivate(__pyx_v_count) lastprivate(__pyx_v_word_a) lastprivate(__pyx_v_i) lastprivate(__pyx_v_shuffle_index) lastprivate(__pyx_v_prediction) lastprivate(__pyx_v_entry_weight) firstprivate(__pyx_v_j) lastprivate(__pyx_v_j) lastprivate(__pyx_v_learning_rate) lastprivate(__pyx_v_loss) lastprivate(__pyx_v_word_b) lastprivate(__pyx_v_gradient) schedule(dynamic) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_j = 0 + 1 * __pyx_t_2; /* Initialize private variables to invalid values */ __pyx_v_count = ((double)__PYX_NAN()); __pyx_v_word_a = ((int)0xbad0bad0); __pyx_v_i = ((int)0xbad0bad0); __pyx_v_shuffle_index = ((int)0xbad0bad0); __pyx_v_prediction = ((double)__PYX_NAN()); __pyx_v_entry_weight = ((double)__PYX_NAN()); __pyx_v_learning_rate = ((double)__PYX_NAN()); __pyx_v_loss = ((double)__PYX_NAN()); __pyx_v_word_b = ((int)0xbad0bad0); __pyx_v_gradient = ((double)__PYX_NAN()); /* "glove/glove_cython.pyx":62 * for j in prange(no_cooccurrences, num_threads=no_threads, * schedule='dynamic'): * shuffle_index = shuffle_indices[j] # <<<<<<<<<<<<<< * word_a = row[shuffle_index] * word_b = col[shuffle_index] */ __pyx_t_4 = __pyx_v_j; __pyx_v_shuffle_index = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_shuffle_indices.data) + __pyx_t_4)) ))); /* "glove/glove_cython.pyx":63 * schedule='dynamic'): * shuffle_index = shuffle_indices[j] * word_a = row[shuffle_index] # <<<<<<<<<<<<<< * word_b = col[shuffle_index] * count = counts[shuffle_index] */ __pyx_t_5 = __pyx_v_shuffle_index; __pyx_v_word_a = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_row.data) + __pyx_t_5)) ))); /* "glove/glove_cython.pyx":64 * shuffle_index = shuffle_indices[j] * word_a = row[shuffle_index] * word_b = col[shuffle_index] # <<<<<<<<<<<<<< * count = counts[shuffle_index] * */ __pyx_t_6 = __pyx_v_shuffle_index; __pyx_v_word_b = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_col.data) + __pyx_t_6)) ))); /* "glove/glove_cython.pyx":65 * word_a = row[shuffle_index] * word_b = col[shuffle_index] * count = counts[shuffle_index] # <<<<<<<<<<<<<< * * # Get prediction */ __pyx_t_7 = __pyx_v_shuffle_index; __pyx_v_count = (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_counts.data) + __pyx_t_7)) ))); /* "glove/glove_cython.pyx":68 * * # Get prediction * prediction = 0.0 # <<<<<<<<<<<<<< * * for i in range(dim): */ __pyx_v_prediction = 0.0; /* "glove/glove_cython.pyx":70 * prediction = 0.0 * * for i in range(dim): # <<<<<<<<<<<<<< * prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i] * */ __pyx_t_8 = __pyx_v_dim; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_i = __pyx_t_9; /* "glove/glove_cython.pyx":71 * * for i in range(dim): * prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i] # <<<<<<<<<<<<<< * * prediction = prediction + wordbias[word_a] + wordbias[word_b] */ __pyx_t_10 = __pyx_v_word_a; __pyx_t_11 = __pyx_v_i; __pyx_t_12 = __pyx_v_word_b; __pyx_t_13 = __pyx_v_i; __pyx_v_prediction = (__pyx_v_prediction + ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_10 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_11)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_12 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_13)) ))))); } /* "glove/glove_cython.pyx":73 * prediction = prediction + wordvec[word_a, i] * wordvec[word_b, i] * * prediction = prediction + wordbias[word_a] + wordbias[word_b] # <<<<<<<<<<<<<< * * # Compute loss and the example weight. */ __pyx_t_8 = __pyx_v_word_a; __pyx_t_9 = __pyx_v_word_b; __pyx_v_prediction = ((__pyx_v_prediction + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_8)) )))) + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_9)) )))); /* "glove/glove_cython.pyx":76 * * # Compute loss and the example weight. * entry_weight = double_min(1.0, (count / max_count)) ** alpha # <<<<<<<<<<<<<< * loss = entry_weight * (prediction - c_log(count)) * */ __pyx_v_entry_weight = pow(__pyx_f_5glove_12glove_cython_double_min(1.0, (__pyx_v_count / __pyx_v_max_count)), __pyx_v_alpha); /* "glove/glove_cython.pyx":77 * # Compute loss and the example weight. * entry_weight = double_min(1.0, (count / max_count)) ** alpha * loss = entry_weight * (prediction - c_log(count)) # <<<<<<<<<<<<<< * * # Clip the loss for numerical stability. */ __pyx_v_loss = (__pyx_v_entry_weight * (__pyx_v_prediction - log(__pyx_v_count))); /* "glove/glove_cython.pyx":80 * * # Clip the loss for numerical stability. * if loss < -max_loss: # <<<<<<<<<<<<<< * loss = -max_loss * elif loss > max_loss: */ __pyx_t_14 = ((__pyx_v_loss < (-__pyx_v_max_loss)) != 0); if (__pyx_t_14) { /* "glove/glove_cython.pyx":81 * # Clip the loss for numerical stability. * if loss < -max_loss: * loss = -max_loss # <<<<<<<<<<<<<< * elif loss > max_loss: * loss = max_loss */ __pyx_v_loss = (-__pyx_v_max_loss); goto __pyx_L12; } /* "glove/glove_cython.pyx":82 * if loss < -max_loss: * loss = -max_loss * elif loss > max_loss: # <<<<<<<<<<<<<< * loss = max_loss * */ __pyx_t_14 = ((__pyx_v_loss > __pyx_v_max_loss) != 0); if (__pyx_t_14) { /* "glove/glove_cython.pyx":83 * loss = -max_loss * elif loss > max_loss: * loss = max_loss # <<<<<<<<<<<<<< * * # Update step: apply gradients and reproject */ __pyx_v_loss = __pyx_v_max_loss; goto __pyx_L12; } __pyx_L12:; /* "glove/glove_cython.pyx":87 * # Update step: apply gradients and reproject * # onto the unit sphere. * for i in range(dim): # <<<<<<<<<<<<<< * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) */ __pyx_t_15 = __pyx_v_dim; for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) { __pyx_v_i = __pyx_t_16; /* "glove/glove_cython.pyx":89 * for i in range(dim): * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) # <<<<<<<<<<<<<< * gradient = loss * wordvec[word_b, i] * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate */ __pyx_t_17 = __pyx_v_word_a; __pyx_t_18 = __pyx_v_i; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_17 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_18)) ))))); /* "glove/glove_cython.pyx":90 * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) * gradient = loss * wordvec[word_b, i] # <<<<<<<<<<<<<< * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate * * gradient) */ __pyx_t_19 = __pyx_v_word_b; __pyx_t_20 = __pyx_v_i; __pyx_v_gradient = (__pyx_v_loss * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_19 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_20)) )))); /* "glove/glove_cython.pyx":91 * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_a, i]) * gradient = loss * wordvec[word_b, i] * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate # <<<<<<<<<<<<<< * * gradient) * wordvec_sum_gradients[word_a, i] += gradient ** 2 */ __pyx_t_21 = __pyx_v_word_a; __pyx_t_22 = __pyx_v_i; /* "glove/glove_cython.pyx":92 * gradient = loss * wordvec[word_b, i] * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate * * gradient) # <<<<<<<<<<<<<< * wordvec_sum_gradients[word_a, i] += gradient ** 2 * */ __pyx_t_23 = __pyx_v_word_a; __pyx_t_24 = __pyx_v_i; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_23 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_24)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_21 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_22)) ))) - (__pyx_v_learning_rate * __pyx_v_gradient)); /* "glove/glove_cython.pyx":93 * wordvec[word_a, i] = (wordvec[word_a, i] - learning_rate * * gradient) * wordvec_sum_gradients[word_a, i] += gradient ** 2 # <<<<<<<<<<<<<< * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) */ __pyx_t_25 = __pyx_v_word_a; __pyx_t_26 = __pyx_v_i; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_25 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_26)) )) += pow(__pyx_v_gradient, 2.0); /* "glove/glove_cython.pyx":95 * wordvec_sum_gradients[word_a, i] += gradient ** 2 * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) # <<<<<<<<<<<<<< * gradient = loss * wordvec[word_a, i] * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate */ __pyx_t_27 = __pyx_v_word_b; __pyx_t_28 = __pyx_v_i; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_27 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_28)) ))))); /* "glove/glove_cython.pyx":96 * * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) * gradient = loss * wordvec[word_a, i] # <<<<<<<<<<<<<< * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate * * gradient) */ __pyx_t_29 = __pyx_v_word_a; __pyx_t_30 = __pyx_v_i; __pyx_v_gradient = (__pyx_v_loss * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_29 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_30)) )))); /* "glove/glove_cython.pyx":97 * learning_rate = initial_learning_rate / sqrt(wordvec_sum_gradients[word_b, i]) * gradient = loss * wordvec[word_a, i] * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate # <<<<<<<<<<<<<< * * gradient) * wordvec_sum_gradients[word_b, i] += gradient ** 2 */ __pyx_t_31 = __pyx_v_word_b; __pyx_t_32 = __pyx_v_i; /* "glove/glove_cython.pyx":98 * gradient = loss * wordvec[word_a, i] * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate * * gradient) # <<<<<<<<<<<<<< * wordvec_sum_gradients[word_b, i] += gradient ** 2 * */ __pyx_t_33 = __pyx_v_word_b; __pyx_t_34 = __pyx_v_i; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_33 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_34)) )) = ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_31 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_32)) ))) - (__pyx_v_learning_rate * __pyx_v_gradient)); /* "glove/glove_cython.pyx":99 * wordvec[word_b, i] = (wordvec[word_b, i] - learning_rate * * gradient) * wordvec_sum_gradients[word_b, i] += gradient ** 2 # <<<<<<<<<<<<<< * * # Update word biases. */ __pyx_t_35 = __pyx_v_word_b; __pyx_t_36 = __pyx_v_i; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec_sum_gradients.data + __pyx_t_35 * __pyx_v_wordvec_sum_gradients.strides[0]) )) + __pyx_t_36)) )) += pow(__pyx_v_gradient, 2.0); } /* "glove/glove_cython.pyx":102 * * # Update word biases. * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a]) # <<<<<<<<<<<<<< * wordbias[word_a] -= learning_rate * loss * wordbias_sum_gradients[word_a] += loss ** 2 */ __pyx_t_15 = __pyx_v_word_a; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_15)) ))))); /* "glove/glove_cython.pyx":103 * # Update word biases. * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a]) * wordbias[word_a] -= learning_rate * loss # <<<<<<<<<<<<<< * wordbias_sum_gradients[word_a] += loss ** 2 * */ __pyx_t_16 = __pyx_v_word_a; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_16)) )) -= (__pyx_v_learning_rate * __pyx_v_loss); /* "glove/glove_cython.pyx":104 * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_a]) * wordbias[word_a] -= learning_rate * loss * wordbias_sum_gradients[word_a] += loss ** 2 # <<<<<<<<<<<<<< * * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) */ __pyx_t_37 = __pyx_v_word_a; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_37)) )) += pow(__pyx_v_loss, 2.0); /* "glove/glove_cython.pyx":106 * wordbias_sum_gradients[word_a] += loss ** 2 * * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) # <<<<<<<<<<<<<< * wordbias[word_b] -= learning_rate * loss * wordbias_sum_gradients[word_b] += loss ** 2 */ __pyx_t_38 = __pyx_v_word_b; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_38)) ))))); /* "glove/glove_cython.pyx":107 * * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) * wordbias[word_b] -= learning_rate * loss # <<<<<<<<<<<<<< * wordbias_sum_gradients[word_b] += loss ** 2 * */ __pyx_t_39 = __pyx_v_word_b; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_39)) )) -= (__pyx_v_learning_rate * __pyx_v_loss); /* "glove/glove_cython.pyx":108 * learning_rate = initial_learning_rate / sqrt(wordbias_sum_gradients[word_b]) * wordbias[word_b] -= learning_rate * loss * wordbias_sum_gradients[word_b] += loss ** 2 # <<<<<<<<<<<<<< * * */ __pyx_t_40 = __pyx_v_word_b; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias_sum_gradients.data) + __pyx_t_40)) )) += pow(__pyx_v_loss, 2.0); } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "glove/glove_cython.pyx":59 * # We iterate over random indices to simulate * # shuffling the cooccurrence matrix. * with nogil: # <<<<<<<<<<<<<< * for j in prange(no_cooccurrences, num_threads=no_threads, * schedule='dynamic'): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "glove/glove_cython.pyx":20 * * * def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[:, ::1] wordvec_sum_gradients, * double[::1] wordbias, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec_sum_gradients, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordbias, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordbias_sum_gradients, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_row, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_col, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_counts, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_shuffle_indices, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "glove/glove_cython.pyx":111 * * * def transform_paragraph(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordbias, * double[::1] paragraphvec, */ /* Python wrapper */ static PyObject *__pyx_pw_5glove_12glove_cython_3transform_paragraph(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_5glove_12glove_cython_2transform_paragraph[] = "\n Compute a vector representation of a paragraph. This has\n the effect of making the paragraph vector close to words\n that occur in it. The representation should be more\n similar to words that occur in it multiple times, and \n less close to words that are common in the corpus (have\n large word bias values).\n\n This should be be similar to a tf-idf weighting.\n "; static PyMethodDef __pyx_mdef_5glove_12glove_cython_3transform_paragraph = {__Pyx_NAMESTR("transform_paragraph"), (PyCFunction)__pyx_pw_5glove_12glove_cython_3transform_paragraph, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_5glove_12glove_cython_2transform_paragraph)}; static PyObject *__pyx_pw_5glove_12glove_cython_3transform_paragraph(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_wordvec = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_wordbias = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_paragraphvec = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_sum_gradients = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_row = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_counts = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_shuffle_indices = { 0, 0, { 0 }, { 0 }, { 0 } }; double __pyx_v_initial_learning_rate; double __pyx_v_max_count; double __pyx_v_alpha; int __pyx_v_epochs; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("transform_paragraph (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wordvec,&__pyx_n_s_wordbias,&__pyx_n_s_paragraphvec,&__pyx_n_s_sum_gradients,&__pyx_n_s_row,&__pyx_n_s_counts,&__pyx_n_s_shuffle_indices,&__pyx_n_s_initial_learning_rate,&__pyx_n_s_max_count,&__pyx_n_s_alpha,&__pyx_n_s_epochs,0}; PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wordvec)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_wordbias)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_paragraphvec)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (likely((values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_sum_gradients)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 4: if (likely((values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_row)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 5: if (likely((values[5] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_counts)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 5); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 6: if (likely((values[6] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shuffle_indices)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 6); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 7: if (likely((values[7] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_initial_learning_rate)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 7); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 8: if (likely((values[8] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_max_count)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 8); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 9: if (likely((values[9] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_alpha)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 9); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 10: if (likely((values[10] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_epochs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, 10); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "transform_paragraph") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 11) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); values[4] = PyTuple_GET_ITEM(__pyx_args, 4); values[5] = PyTuple_GET_ITEM(__pyx_args, 5); values[6] = PyTuple_GET_ITEM(__pyx_args, 6); values[7] = PyTuple_GET_ITEM(__pyx_args, 7); values[8] = PyTuple_GET_ITEM(__pyx_args, 8); values[9] = PyTuple_GET_ITEM(__pyx_args, 9); values[10] = PyTuple_GET_ITEM(__pyx_args, 10); } __pyx_v_wordvec = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(values[0]); if (unlikely(!__pyx_v_wordvec.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_wordbias = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[1]); if (unlikely(!__pyx_v_wordbias.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 112; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_paragraphvec = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[2]); if (unlikely(!__pyx_v_paragraphvec.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_sum_gradients = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[3]); if (unlikely(!__pyx_v_sum_gradients.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_row = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[4]); if (unlikely(!__pyx_v_row.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 115; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_counts = __Pyx_PyObject_to_MemoryviewSlice_dc_double(values[5]); if (unlikely(!__pyx_v_counts.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_shuffle_indices = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[6]); if (unlikely(!__pyx_v_shuffle_indices.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_initial_learning_rate = __pyx_PyFloat_AsDouble(values[7]); if (unlikely((__pyx_v_initial_learning_rate == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 118; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_max_count = __pyx_PyFloat_AsDouble(values[8]); if (unlikely((__pyx_v_max_count == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_alpha = __pyx_PyFloat_AsDouble(values[9]); if (unlikely((__pyx_v_alpha == (double)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_epochs = __Pyx_PyInt_As_int(values[10]); if (unlikely((__pyx_v_epochs == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("transform_paragraph", 1, 11, 11, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("glove.glove_cython.transform_paragraph", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_5glove_12glove_cython_2transform_paragraph(__pyx_self, __pyx_v_wordvec, __pyx_v_wordbias, __pyx_v_paragraphvec, __pyx_v_sum_gradients, __pyx_v_row, __pyx_v_counts, __pyx_v_shuffle_indices, __pyx_v_initial_learning_rate, __pyx_v_max_count, __pyx_v_alpha, __pyx_v_epochs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_5glove_12glove_cython_2transform_paragraph(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_wordvec, __Pyx_memviewslice __pyx_v_wordbias, __Pyx_memviewslice __pyx_v_paragraphvec, __Pyx_memviewslice __pyx_v_sum_gradients, __Pyx_memviewslice __pyx_v_row, __Pyx_memviewslice __pyx_v_counts, __Pyx_memviewslice __pyx_v_shuffle_indices, double __pyx_v_initial_learning_rate, double __pyx_v_max_count, double __pyx_v_alpha, int __pyx_v_epochs) { int __pyx_v_dim; int __pyx_v_no_cooccurrences; int __pyx_v_word_b; double __pyx_v_count; double __pyx_v_prediction; double __pyx_v_entry_weight; double __pyx_v_loss; double __pyx_v_gradient; CYTHON_UNUSED int __pyx_v_epoch; int __pyx_v_i; int __pyx_v_j; int __pyx_v_shuffle_index; double __pyx_v_learning_rate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; int __pyx_t_14; int __pyx_t_15; int __pyx_t_16; int __pyx_t_17; int __pyx_t_18; int __pyx_t_19; __Pyx_RefNannySetupContext("transform_paragraph", 0); /* "glove/glove_cython.pyx":135 * # Get number of latent dimensions and * # number of cooccurrences. * cdef int dim = wordvec.shape[1] # <<<<<<<<<<<<<< * cdef int no_cooccurrences = row.shape[0] * */ __pyx_v_dim = (__pyx_v_wordvec.shape[1]); /* "glove/glove_cython.pyx":136 * # number of cooccurrences. * cdef int dim = wordvec.shape[1] * cdef int no_cooccurrences = row.shape[0] # <<<<<<<<<<<<<< * * # Hold indices of current words and */ __pyx_v_no_cooccurrences = (__pyx_v_row.shape[0]); /* "glove/glove_cython.pyx":154 * # We iterate over random indices to simulate * # shuffling the cooccurrence matrix. * for epoch in range(epochs): # <<<<<<<<<<<<<< * for j in range(no_cooccurrences): * shuffle_index = shuffle_indices[j] */ __pyx_t_1 = __pyx_v_epochs; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_epoch = __pyx_t_2; /* "glove/glove_cython.pyx":155 * # shuffling the cooccurrence matrix. * for epoch in range(epochs): * for j in range(no_cooccurrences): # <<<<<<<<<<<<<< * shuffle_index = shuffle_indices[j] * */ __pyx_t_3 = __pyx_v_no_cooccurrences; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_j = __pyx_t_4; /* "glove/glove_cython.pyx":156 * for epoch in range(epochs): * for j in range(no_cooccurrences): * shuffle_index = shuffle_indices[j] # <<<<<<<<<<<<<< * * word_b = row[shuffle_index] */ __pyx_t_5 = __pyx_v_j; __pyx_v_shuffle_index = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_shuffle_indices.data) + __pyx_t_5)) ))); /* "glove/glove_cython.pyx":158 * shuffle_index = shuffle_indices[j] * * word_b = row[shuffle_index] # <<<<<<<<<<<<<< * count = counts[shuffle_index] * */ __pyx_t_6 = __pyx_v_shuffle_index; __pyx_v_word_b = (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_row.data) + __pyx_t_6)) ))); /* "glove/glove_cython.pyx":159 * * word_b = row[shuffle_index] * count = counts[shuffle_index] # <<<<<<<<<<<<<< * * # Get prediction */ __pyx_t_7 = __pyx_v_shuffle_index; __pyx_v_count = (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_counts.data) + __pyx_t_7)) ))); /* "glove/glove_cython.pyx":162 * * # Get prediction * prediction = 0.0 # <<<<<<<<<<<<<< * for i in range(dim): * prediction = prediction + paragraphvec[i] * wordvec[word_b, i] */ __pyx_v_prediction = 0.0; /* "glove/glove_cython.pyx":163 * # Get prediction * prediction = 0.0 * for i in range(dim): # <<<<<<<<<<<<<< * prediction = prediction + paragraphvec[i] * wordvec[word_b, i] * prediction += wordbias[word_b] */ __pyx_t_8 = __pyx_v_dim; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_i = __pyx_t_9; /* "glove/glove_cython.pyx":164 * prediction = 0.0 * for i in range(dim): * prediction = prediction + paragraphvec[i] * wordvec[word_b, i] # <<<<<<<<<<<<<< * prediction += wordbias[word_b] * */ __pyx_t_10 = __pyx_v_i; __pyx_t_11 = __pyx_v_word_b; __pyx_t_12 = __pyx_v_i; __pyx_v_prediction = (__pyx_v_prediction + ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_paragraphvec.data) + __pyx_t_10)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_11 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_12)) ))))); } /* "glove/glove_cython.pyx":165 * for i in range(dim): * prediction = prediction + paragraphvec[i] * wordvec[word_b, i] * prediction += wordbias[word_b] # <<<<<<<<<<<<<< * * # Compute loss and the example weight. */ __pyx_t_8 = __pyx_v_word_b; __pyx_v_prediction = (__pyx_v_prediction + (*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_wordbias.data) + __pyx_t_8)) )))); /* "glove/glove_cython.pyx":168 * * # Compute loss and the example weight. * entry_weight = double_min(1.0, (count / max_count)) ** alpha # <<<<<<<<<<<<<< * loss = entry_weight * (prediction - c_log(count)) * */ __pyx_v_entry_weight = pow(__pyx_f_5glove_12glove_cython_double_min(1.0, (__pyx_v_count / __pyx_v_max_count)), __pyx_v_alpha); /* "glove/glove_cython.pyx":169 * # Compute loss and the example weight. * entry_weight = double_min(1.0, (count / max_count)) ** alpha * loss = entry_weight * (prediction - c_log(count)) # <<<<<<<<<<<<<< * * # Update step: apply gradients. */ __pyx_v_loss = (__pyx_v_entry_weight * (__pyx_v_prediction - log(__pyx_v_count))); /* "glove/glove_cython.pyx":172 * * # Update step: apply gradients. * for i in range(dim): # <<<<<<<<<<<<<< * learning_rate = initial_learning_rate / sqrt(sum_gradients[i]) * gradient = loss * wordvec[word_b, i] */ __pyx_t_9 = __pyx_v_dim; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_9; __pyx_t_13+=1) { __pyx_v_i = __pyx_t_13; /* "glove/glove_cython.pyx":173 * # Update step: apply gradients. * for i in range(dim): * learning_rate = initial_learning_rate / sqrt(sum_gradients[i]) # <<<<<<<<<<<<<< * gradient = loss * wordvec[word_b, i] * paragraphvec[i] = (paragraphvec[i] - learning_rate */ __pyx_t_14 = __pyx_v_i; __pyx_v_learning_rate = (__pyx_v_initial_learning_rate / sqrt((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_sum_gradients.data) + __pyx_t_14)) ))))); /* "glove/glove_cython.pyx":174 * for i in range(dim): * learning_rate = initial_learning_rate / sqrt(sum_gradients[i]) * gradient = loss * wordvec[word_b, i] # <<<<<<<<<<<<<< * paragraphvec[i] = (paragraphvec[i] - learning_rate * * gradient) */ __pyx_t_15 = __pyx_v_word_b; __pyx_t_16 = __pyx_v_i; __pyx_v_gradient = (__pyx_v_loss * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_wordvec.data + __pyx_t_15 * __pyx_v_wordvec.strides[0]) )) + __pyx_t_16)) )))); /* "glove/glove_cython.pyx":175 * learning_rate = initial_learning_rate / sqrt(sum_gradients[i]) * gradient = loss * wordvec[word_b, i] * paragraphvec[i] = (paragraphvec[i] - learning_rate # <<<<<<<<<<<<<< * * gradient) * sum_gradients[i] += gradient ** 2 */ __pyx_t_17 = __pyx_v_i; /* "glove/glove_cython.pyx":176 * gradient = loss * wordvec[word_b, i] * paragraphvec[i] = (paragraphvec[i] - learning_rate * * gradient) # <<<<<<<<<<<<<< * sum_gradients[i] += gradient ** 2 */ __pyx_t_18 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_paragraphvec.data) + __pyx_t_18)) )) = ((*((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_paragraphvec.data) + __pyx_t_17)) ))) - (__pyx_v_learning_rate * __pyx_v_gradient)); /* "glove/glove_cython.pyx":177 * paragraphvec[i] = (paragraphvec[i] - learning_rate * * gradient) * sum_gradients[i] += gradient ** 2 # <<<<<<<<<<<<<< */ __pyx_t_19 = __pyx_v_i; *((double *) ( /* dim=0 */ ((char *) (((double *) __pyx_v_sum_gradients.data) + __pyx_t_19)) )) += pow(__pyx_v_gradient, 2.0); } } } /* "glove/glove_cython.pyx":111 * * * def transform_paragraph(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordbias, * double[::1] paragraphvec, */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); __PYX_XDEC_MEMVIEW(&__pyx_v_wordvec, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_wordbias, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_paragraphvec, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_sum_gradients, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_row, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_counts, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_shuffle_indices, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":116 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 117; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { /* "View.MemoryView":117 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 116; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_r = __pyx_array_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":116 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; char *__pyx_t_5; int __pyx_t_6; PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":123 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":124 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":126 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":127 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":129 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":130 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if isinstance(format, unicode): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":132 * raise ValueError("itemsize <= 0 for cython.array") * * if isinstance(format, unicode): # <<<<<<<<<<<<<< * format = (<unicode>format).encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyUnicode_Check(__pyx_v_format); __pyx_t_4 = (__pyx_t_2 != 0); if (__pyx_t_4) { /* "View.MemoryView":133 * * if isinstance(format, unicode): * format = (<unicode>format).encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ if (unlikely(__pyx_v_format == Py_None)) { PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%s'", "encode"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_3 = PyUnicode_AsASCIIString(((PyObject*)__pyx_v_format)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; goto __pyx_L5; } __pyx_L5:; /* "View.MemoryView":134 * if isinstance(format, unicode): * format = (<unicode>format).encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":135 * format = (<unicode>format).encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_self->format = __pyx_t_5; /* "View.MemoryView":138 * * * self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyMem_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":139 * * self._shape = <Py_ssize_t *> PyMem_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":141 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":142 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":145 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_6 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_7); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_7); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":146 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":147 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); __pyx_t_7 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":148 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":151 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":152 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":153 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; goto __pyx_L10; } /* "View.MemoryView":154 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":155 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":156 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; goto __pyx_L10; } /*else*/ { /* "View.MemoryView":158 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L10:; /* "View.MemoryView":160 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":163 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":164 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_3 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":165 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":168 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":169 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":170 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":172 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":173 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":174 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[1]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":175 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":176 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } goto __pyx_L13; } __pyx_L13:; goto __pyx_L11; } __pyx_L11:; /* "View.MemoryView":116 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":179 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":180 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":181 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 181; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":182 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); goto __pyx_L3; } /* "View.MemoryView":183 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 183; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":184 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":185 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":186 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":187 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":188 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":189 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":190 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":191 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":192 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":193 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":194 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":196 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":197 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; goto __pyx_L5; } /*else*/ { /* "View.MemoryView":199 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":201 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":179 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":205 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":206 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":207 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); goto __pyx_L3; } /* "View.MemoryView":208 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":209 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":210 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); goto __pyx_L4; } __pyx_L4:; /* "View.MemoryView":212 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyMem_Free(self._shape) * */ free(__pyx_v_self->data); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":213 * self._strides, self.ndim, False) * free(self.data) * PyMem_Free(self._shape) # <<<<<<<<<<<<<< * * property memview: */ PyMem_Free(__pyx_v_self->_shape); /* "View.MemoryView":205 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":217 * property memview: * @cname('get_memview') * def __get__(self): # <<<<<<<<<<<<<< * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE */ /* Python wrapper */ static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/ static PyObject *get_memview(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = get_memview_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *get_memview_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":219 * def __get__(self): * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":220 * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 220; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":217 * property memview: * @cname('get_memview') * def __get__(self): # <<<<<<<<<<<<<< * * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":223 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":224 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 224; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":223 * * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":227 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 227; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":229 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":230 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":229 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":234 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":238 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":239 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 239; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":241 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":242 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":241 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 241; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":243 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":245 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":234 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":271 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 271; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":272 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":271 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":273 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":274 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":273 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":288 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":290 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":294 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":296 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":297 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":299 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview') */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":288 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":317 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 317; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":318 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":319 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":320 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_1 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)((PyObject *)__pyx_memoryview_type))); if (!(__pyx_t_1 != 0)) { __pyx_t_2 = (__pyx_v_obj != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); } else { __pyx_t_3 = (__pyx_t_1 != 0); } if (__pyx_t_3) { /* "View.MemoryView":321 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 321; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":322 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_3 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_3) { /* "View.MemoryView":323 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":324 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * self.lock = PyThread_allocate_lock() */ Py_INCREF(Py_None); goto __pyx_L4; } __pyx_L4:; goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":326 * Py_INCREF(Py_None) * * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock == NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":327 * * self.lock = PyThread_allocate_lock() * if self.lock == NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_3 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_3) { /* "View.MemoryView":328 * self.lock = PyThread_allocate_lock() * if self.lock == NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 328; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":330 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = self.view.format == b'O' * else: */ __pyx_t_3 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_3) { /* "View.MemoryView":331 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = self.view.format == b'O' # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 331; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_self->dtype_is_object = __pyx_t_3; goto __pyx_L6; } /*else*/ { /* "View.MemoryView":333 * self.dtype_is_object = self.view.format == b'O' * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L6:; /* "View.MemoryView":335 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":337 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":317 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":339 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":340 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":341 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * if self.lock != NULL: */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":343 * __Pyx_ReleaseBuffer(&self.view) * * if self.lock != NULL: # <<<<<<<<<<<<<< * PyThread_free_lock(self.lock) * */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":344 * * if self.lock != NULL: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); goto __pyx_L4; } __pyx_L4:; /* "View.MemoryView":339 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":346 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":348 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":350 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (PyList_CheckExact(__pyx_v_index) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; } for (;;) { if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_2)) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_2)) { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 350; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":351 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 351; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_7; } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":353 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":346 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":356 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":357 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":358 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; } /* "View.MemoryView":360 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 360; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":363 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_2) { /* "View.MemoryView":364 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 364; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /*else*/ { /* "View.MemoryView":366 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":367 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 367; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":356 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":369 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":370 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":372 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":373 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":374 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 374; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__pyx_t_4) { /* "View.MemoryView":375 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 375; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L4; } /*else*/ { /* "View.MemoryView":377 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 377; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":379 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":369 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":381 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":382 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, ((PyObject *)__pyx_memoryview_type)); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":384 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":385 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":384 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 384; __pyx_clineno = __LINE__; goto __pyx_L4_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L11_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":386 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":387 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; goto __pyx_L5_exception_handled; } goto __pyx_L6_except_error; __pyx_L6_except_error:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L5_exception_handled:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); __pyx_L11_try_end:; } goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":389 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":381 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":391 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":395 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":396 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 396; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":397 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 397; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":395 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 395; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":391 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":399 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[128]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":401 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":406 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":408 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":409 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":410 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":411 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 411; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":412 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":414 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":416 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":417 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":418 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); goto __pyx_L8; } /*else*/ { /* "View.MemoryView":420 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 420; __pyx_clineno = __LINE__; goto __pyx_L6_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":424 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":425 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 425; __pyx_clineno = __LINE__; goto __pyx_L6_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; goto __pyx_L9; } __pyx_L9:; /* "View.MemoryView":426 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":429 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } /*exception exit:*/{ __pyx_L6_error:; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":399 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":432 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 432; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":433 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 433; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":431 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":435 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; size_t __pyx_t_7; int __pyx_t_8; int __pyx_t_9; PyObject *__pyx_t_10 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":438 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 438; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":441 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":442 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":443 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 443; __pyx_clineno = __LINE__; goto __pyx_L3_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __pyx_v_result = __pyx_t_5; __pyx_t_5 = 0; } /*else:*/ { /* "View.MemoryView":447 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ __pyx_t_7 = strlen(__pyx_v_self->view.format); __pyx_t_8 = ((__pyx_t_7 == 1) != 0); if (__pyx_t_8) { /* "View.MemoryView":448 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 448; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}; __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L6_except_return; } /* "View.MemoryView":449 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L10_try_end; __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":444 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyErr_ExceptionMatches(__pyx_t_5); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_1) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 444; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":445 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; goto __pyx_L4_exception_handled; } goto __pyx_L5_except_error; __pyx_L5_except_error:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; __pyx_L4_exception_handled:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); __pyx_L10_try_end:; } /* "View.MemoryView":435 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":451 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; Py_ssize_t __pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; char *__pyx_t_10; char *__pyx_t_11; char *__pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":454 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 454; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":459 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":460 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 460; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":462 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_6); __Pyx_INCREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_6)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_bytesvalue = ((PyObject*)__pyx_t_6); __pyx_t_6 = 0; } __pyx_L3:; /* "View.MemoryView":464 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 464; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_8 = __pyx_v_bytesvalue; __pyx_t_10 = PyBytes_AS_STRING(__pyx_t_8); __pyx_t_11 = (__pyx_t_10 + PyBytes_GET_SIZE(__pyx_t_8)); for (__pyx_t_12 = __pyx_t_10; __pyx_t_12 < __pyx_t_11; __pyx_t_12++) { __pyx_t_9 = __pyx_t_12; __pyx_v_c = (__pyx_t_9[0]); /* "View.MemoryView":465 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_7; /* "View.MemoryView":464 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":465 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":451 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":468 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":469 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":470 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":472 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":474 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":475 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; goto __pyx_L4; } /*else*/ { /* "View.MemoryView":477 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":479 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":480 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; goto __pyx_L5; } /*else*/ { /* "View.MemoryView":482 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":484 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":485 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; goto __pyx_L6; } /*else*/ { /* "View.MemoryView":487 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":489 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":490 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":491 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":492 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":493 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":494 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":468 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * property T: * @cname('__pyx_memoryview_transpose') * def __get__(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":502 * @cname('__pyx_memoryview_transpose') * def __get__(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 502; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":503 * def __get__(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 503; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":504 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * property base: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":501 * property T: * @cname('__pyx_memoryview_transpose') * def __get__(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":508 * property base: * @cname('__pyx_memoryview__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":509 * @cname('__pyx_memoryview__get__base') * def __get__(self): * return self.obj # <<<<<<<<<<<<<< * * property shape: */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":508 * property base: * @cname('__pyx_memoryview__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":513 * property shape: * @cname('__pyx_memoryview_get_shape') * def __get__(self): # <<<<<<<<<<<<<< * return tuple([self.view.shape[i] for i in xrange(self.view.ndim)]) * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":514 * @cname('__pyx_memoryview_get_shape') * def __get__(self): * return tuple([self.view.shape[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<< * * property strides: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __pyx_v_self->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; __pyx_t_4 = PyInt_FromSsize_t((__pyx_v_self->view.shape[__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __pyx_t_4 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; /* "View.MemoryView":513 * property shape: * @cname('__pyx_memoryview_get_shape') * def __get__(self): # <<<<<<<<<<<<<< * return tuple([self.view.shape[i] for i in xrange(self.view.ndim)]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * property strides: * @cname('__pyx_memoryview_get_strides') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":519 * @cname('__pyx_memoryview_get_strides') * def __get__(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":521 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([self.view.strides[i] for i in xrange(self.view.ndim)]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":523 * raise ValueError("Buffer view does not expose strides") * * return tuple([self.view.strides[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<< * * property suboffsets: */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_v_self->view.ndim; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; __pyx_t_5 = PyInt_FromSsize_t((__pyx_v_self->view.strides[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":518 * property strides: * @cname('__pyx_memoryview_get_strides') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":527 * property suboffsets: * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return [-1] * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":528 * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return [-1] * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":529 * def __get__(self): * if self.view.suboffsets == NULL: * return [-1] * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([self.view.suboffsets[i] for i in xrange(self.view.ndim)]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(1 * ((__pyx_v_self->view.ndim<0) ? 0:__pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_self->view.ndim; __pyx_temp++) { __Pyx_INCREF(__pyx_int_neg_1); PyList_SET_ITEM(__pyx_t_2, __pyx_temp, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); } } __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":531 * return [-1] * self.view.ndim * * return tuple([self.view.suboffsets[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<< * * property ndim: */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __pyx_v_self->view.ndim; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; __pyx_t_5 = PyInt_FromSsize_t((__pyx_v_self->view.suboffsets[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":527 * property suboffsets: * @cname('__pyx_memoryview_get_suboffsets') * def __get__(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return [-1] * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":535 * property ndim: * @cname('__pyx_memoryview_get_ndim') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":536 * @cname('__pyx_memoryview_get_ndim') * def __get__(self): * return self.view.ndim # <<<<<<<<<<<<<< * * property itemsize: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 536; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":535 * property ndim: * @cname('__pyx_memoryview_get_ndim') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":540 * property itemsize: * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":541 * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * property nbytes: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 541; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":540 * property itemsize: * @cname('__pyx_memoryview_get_itemsize') * def __get__(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":545 * property nbytes: * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":546 * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * property size: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 546; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":545 * property nbytes: * @cname('__pyx_memoryview_get_nbytes') * def __get__(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":550 * property size: * @cname('__pyx_memoryview_get_size') * def __get__(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":551 * @cname('__pyx_memoryview_get_size') * def __get__(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":552 * def __get__(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.shape: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":554 * result = 1 * * for length in self.shape: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (PyList_CheckExact(__pyx_t_3) || PyTuple_CheckExact(__pyx_t_3)) { __pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; for (;;) { if (!__pyx_t_6 && PyList_CheckExact(__pyx_t_4)) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_6 && PyTuple_CheckExact(__pyx_t_4)) { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_3 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_3)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_3); } __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":555 * * for length in self.shape: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 555; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_3); __pyx_t_3 = 0; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":557 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":559 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":550 * property size: * @cname('__pyx_memoryview_get_size') * def __get__(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":561 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":562 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":563 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; } /* "View.MemoryView":565 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":561 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":568 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":569 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":568 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 568; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":567 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":571 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":572 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 572; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":571 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":578 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice, 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":579 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice, 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 579; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":581 * return slice_is_contig(mslice, 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":584 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice, 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":585 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice, 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 585; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":581 * return slice_is_contig(mslice, 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":587 * return slice_is_contig(mslice, 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":589 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":591 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":592 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 592; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":597 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 597; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":587 * return slice_is_contig(mslice, 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":599 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":601 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":603 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":604 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 604; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":609 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 609; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":599 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":613 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":614 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 614; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":615 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":616 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":613 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":620 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, ((PyObject *)__pyx_memoryview_type)); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":619 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":622 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; int __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":627 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":628 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 628; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":630 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":632 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 632; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":633 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":634 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":635 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (PyList_CheckExact(__pyx_v_tup) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; } for (;;) { if (!__pyx_t_6 && PyList_CheckExact(__pyx_t_4)) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_6 && PyTuple_CheckExact(__pyx_t_4)) { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 635; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":636 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":637 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":638 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_9 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_t_7); PyList_SET_ITEM(__pyx_t_9, __pyx_temp, __pyx_t_7); __Pyx_GIVEREF(__pyx_t_7); } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_10 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_9); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":639 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; goto __pyx_L7; } /*else*/ { /* "View.MemoryView":641 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_t_9 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = __Pyx_PyList_Append(__pyx_v_result, __pyx_t_9); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __pyx_L7:; /* "View.MemoryView":642 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; goto __pyx_L6; } /*else*/ { /* "View.MemoryView":644 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ __pyx_t_1 = PySlice_Check(__pyx_v_item); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { __pyx_t_1 = ((!(__Pyx_PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_11 = __pyx_t_1; } else { __pyx_t_11 = __pyx_t_2; } if (__pyx_t_11) { /* "View.MemoryView":645 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_7, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 645; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":647 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ if (!__pyx_v_have_slices) { __pyx_t_11 = PySlice_Check(__pyx_v_item); __pyx_t_2 = __pyx_t_11; } else { __pyx_t_2 = __pyx_v_have_slices; } __pyx_v_have_slices = __pyx_t_2; /* "View.MemoryView":648 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_10 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 648; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L6:; } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":650 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 650; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":651 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_2 = (__pyx_v_nslices != 0); if (__pyx_t_2) { /* "View.MemoryView":652 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_t_3); PyList_SET_ITEM(__pyx_t_4, __pyx_temp, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_10 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_4); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; goto __pyx_L9; } __pyx_L9:; /* "View.MemoryView":654 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_2) { __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = __pyx_t_3; __pyx_t_3 = 0; } else { __pyx_t_9 = __pyx_t_4; __pyx_t_4 = 0; } __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 654; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_9); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_9 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":622 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":656 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * cdef int i * for i in range(ndim): */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":658 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * cdef int i * for i in range(ndim): # <<<<<<<<<<<<<< * if suboffsets[i] >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":659 * cdef int i * for i in range(ndim): * if suboffsets[i] >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_3 = (((__pyx_v_suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_3) { /* "View.MemoryView":660 * for i in range(ndim): * if suboffsets[i] >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } /* "View.MemoryView":656 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * cdef int i * for i in range(ndim): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":667 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":668 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":675 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":679 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 679; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /* "View.MemoryView":681 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":682 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 682; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":683 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); goto __pyx_L3; } /*else*/ { /* "View.MemoryView":685 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":686 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":692 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":693 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":698 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":699 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":703 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (PyList_CheckExact(__pyx_v_indices) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; } for (;;) { if (!__pyx_t_8 && PyList_CheckExact(__pyx_t_3)) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else if (!__pyx_t_8 && PyTuple_CheckExact(__pyx_t_3)) { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else {__pyx_filename = __pyx_f[1]; __pyx_lineno = 703; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":704 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (__Pyx_PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":708 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 708; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":705 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 705; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } /* "View.MemoryView":711 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":712 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":713 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":714 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1; /* "View.MemoryView":715 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); goto __pyx_L6; } /*else*/ { /* "View.MemoryView":717 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_INCREF(__pyx_int_0); __pyx_t_12 = __pyx_int_0; } else { __pyx_t_12 = __pyx_t_9; __pyx_t_9 = 0; } __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_12); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 717; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":718 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_12); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __Pyx_INCREF(__pyx_int_0); __pyx_t_9 = __pyx_int_0; } else { __pyx_t_9 = __pyx_t_12; __pyx_t_12 = 0; } __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 718; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":719 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_INCREF(__pyx_int_0); __pyx_t_12 = __pyx_int_0; } else { __pyx_t_12 = __pyx_t_9; __pyx_t_9 = 0; } __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_12); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 719; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":721 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 721; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_12); __pyx_t_1 = (__pyx_t_12 != Py_None); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":722 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 722; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_12); __pyx_t_1 = (__pyx_t_12 != Py_None); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":723 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 723; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_12); __pyx_t_1 = (__pyx_t_12 != Py_None); __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":725 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":731 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":733 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":734 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":735 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 735; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":736 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 736; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":734 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 734; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /*else*/ { /* "View.MemoryView":739 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":740 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":739 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 739; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":667 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_12); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":764 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":784 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":786 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":787 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); goto __pyx_L4; } __pyx_L4:; /* "View.MemoryView":788 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":789 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":792 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ __pyx_t_2 = (__pyx_v_have_step != 0); if (__pyx_t_2) { __pyx_t_1 = (__pyx_v_step < 0); __pyx_t_4 = __pyx_t_1; } else { __pyx_t_4 = __pyx_t_2; } __pyx_v_negative_step = __pyx_t_4; /* "View.MemoryView":794 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ if ((__pyx_v_have_step != 0)) { __pyx_t_4 = (__pyx_v_step == 0); __pyx_t_2 = __pyx_t_4; } else { __pyx_t_2 = (__pyx_v_have_step != 0); } if (__pyx_t_2) { /* "View.MemoryView":795 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L6; } __pyx_L6:; /* "View.MemoryView":798 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":799 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":800 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":801 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":802 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; goto __pyx_L9; } __pyx_L9:; goto __pyx_L8; } /* "View.MemoryView":803 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":804 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":805 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); goto __pyx_L10; } /*else*/ { /* "View.MemoryView":807 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_start = __pyx_v_shape; } __pyx_L10:; goto __pyx_L8; } __pyx_L8:; goto __pyx_L7; } /*else*/ { /* "View.MemoryView":809 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":810 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); goto __pyx_L11; } /*else*/ { /* "View.MemoryView":812 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ __pyx_v_start = 0; } __pyx_L11:; } __pyx_L7:; /* "View.MemoryView":814 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":815 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":816 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":817 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":818 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; goto __pyx_L14; } __pyx_L14:; goto __pyx_L13; } /* "View.MemoryView":819 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":820 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; goto __pyx_L13; } __pyx_L13:; goto __pyx_L12; } /*else*/ { /* "View.MemoryView":822 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":823 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1; goto __pyx_L15; } /*else*/ { /* "View.MemoryView":825 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ __pyx_v_stop = __pyx_v_shape; } __pyx_L15:; } __pyx_L12:; /* "View.MemoryView":827 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":828 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; goto __pyx_L16; } __pyx_L16:; /* "View.MemoryView":832 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":834 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":835 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); goto __pyx_L17; } __pyx_L17:; /* "View.MemoryView":837 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":838 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; goto __pyx_L18; } __pyx_L18:; /* "View.MemoryView":841 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":842 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":843 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":846 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); goto __pyx_L19; } /*else*/ { /* "View.MemoryView":849 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L19:; /* "View.MemoryView":851 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":852 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); goto __pyx_L22; } /*else*/ { /* "View.MemoryView":856 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 856; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L22:; goto __pyx_L21; } /*else*/ { /* "View.MemoryView":859 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L21:; goto __pyx_L20; } __pyx_L20:; /* "View.MemoryView":861 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":764 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":867 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":869 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1; /* "View.MemoryView":870 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":873 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":874 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[1]; __pyx_lineno = 874; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif {__pyx_filename = __pyx_f[1]; __pyx_lineno = 874; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize); /* "View.MemoryView":875 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":877 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":878 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":879 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":880 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); goto __pyx_L4; } __pyx_L4:; } __pyx_L3:; /* "View.MemoryView":882 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":883 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":884 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":885 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 885; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 885; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 885; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 885; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 885; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } goto __pyx_L5; } __pyx_L5:; /* "View.MemoryView":887 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":888 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 888; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":890 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":891 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":892 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); goto __pyx_L8; } __pyx_L8:; /* "View.MemoryView":894 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":867 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":900 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":901 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":903 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":904 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":908 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = (__pyx_v_ndim / 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":909 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":910 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":911 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":913 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_6 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_6) { __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_8 = __pyx_t_7; } else { __pyx_t_8 = __pyx_t_6; } if (__pyx_t_8) { /* "View.MemoryView":914 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, __pyx_k_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 914; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L5; } __pyx_L5:; } /* "View.MemoryView":916 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":900 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":933 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":934 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":933 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":936 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":937 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":938 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 938; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /*else*/ { /* "View.MemoryView":940 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_vtabptr_memoryview->convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 940; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":936 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":942 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":943 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":944 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 944; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } /*else*/ { /* "View.MemoryView":946 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * property base: */ __pyx_t_3 = __pyx_vtabptr_memoryview->assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":942 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":950 * property base: * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":951 * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":950 * property base: * @cname('__pyx_memoryviewslice__get__base') * def __get__(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":957 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":966 * cdef int i * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":967 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; } /* "View.MemoryView":972 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_GIVEREF(Py_None); __Pyx_INCREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryviewslice_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 972; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":974 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":975 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":977 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 977; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":978 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":980 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":981 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":982 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":983 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":984 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":986 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":988 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":989 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":990 * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":992 * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for i in range(ndim): * result.view.len *= result.view.shape[i] */ __pyx_t_6 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_6; /* "View.MemoryView":993 * * result.view.len = result.view.itemsize * for i in range(ndim): # <<<<<<<<<<<<<< * result.view.len *= result.view.shape[i] * */ __pyx_t_7 = __pyx_v_ndim; for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":994 * result.view.len = result.view.itemsize * for i in range(ndim): * result.view.len *= result.view.shape[i] # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_v_result->__pyx_base.view.len = (__pyx_v_result->__pyx_base.view.len * (__pyx_v_result->__pyx_base.view.shape[__pyx_v_i])); } /* "View.MemoryView":996 * result.view.len *= result.view.shape[i] * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":997 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":999 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":957 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1002 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1005 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1006 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1006; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1007 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; } /*else*/ { /* "View.MemoryView":1009 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1010 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1002 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1013 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1017 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1018 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1019 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1021 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1022 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1024 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1025 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * if suboffsets == NULL: */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1026 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * if suboffsets == NULL: * dst.suboffsets[dim] = -1 */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1027 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * if suboffsets == NULL: # <<<<<<<<<<<<<< * dst.suboffsets[dim] = -1 * else: */ __pyx_t_4 = ((__pyx_v_suboffsets == NULL) != 0); if (__pyx_t_4) { /* "View.MemoryView":1028 * dst.strides[dim] = strides[dim] * if suboffsets == NULL: * dst.suboffsets[dim] = -1 # <<<<<<<<<<<<<< * else: * dst.suboffsets[dim] = suboffsets[dim] */ (__pyx_v_dst->suboffsets[__pyx_v_dim]) = -1; goto __pyx_L5; } /*else*/ { /* "View.MemoryView":1030 * dst.suboffsets[dim] = -1 * else: * dst.suboffsets[dim] = suboffsets[dim] # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ (__pyx_v_dst->suboffsets[__pyx_v_dim]) = (__pyx_v_suboffsets[__pyx_v_dim]); } __pyx_L5:; } /* "View.MemoryView":1013 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1033 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1036 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1037 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1037; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1033 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1040 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1047 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type)); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1048 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1049 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":1051 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ __pyx_v_to_object_func = NULL; /* "View.MemoryView":1052 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1054 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1056 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1054; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1040 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1062 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1063 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1064 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; } /*else*/ { /* "View.MemoryView":1066 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1062 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1069 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1074 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1075 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1077 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1078 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1079 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1080 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; } } __pyx_L4_break:; /* "View.MemoryView":1082 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1083 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1084 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1085 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; } } __pyx_L7_break:; /* "View.MemoryView":1087 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1088 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; } /*else*/ { /* "View.MemoryView":1090 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1069 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1093 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1100 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1101 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1102 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1103 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1105 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1106 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_1 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_1) { __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1107 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_3 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_3) { __pyx_t_3 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_4 = (__pyx_t_3 != 0); } else { __pyx_t_4 = __pyx_t_2; } __pyx_t_2 = __pyx_t_4; } else { __pyx_t_2 = __pyx_t_1; } if (__pyx_t_2) { /* "View.MemoryView":1108 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); goto __pyx_L4; } /*else*/ { /* "View.MemoryView":1110 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ __pyx_t_5 = __pyx_v_dst_extent; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1111 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1112 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1113 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; goto __pyx_L3; } /*else*/ { /* "View.MemoryView":1115 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ __pyx_t_5 = __pyx_v_dst_extent; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1116 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1120 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1121 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1093 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1123 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1126 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1123 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1130 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1133 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1135 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1136 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1138 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1130 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1141 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1150 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1151 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1152 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1153 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } goto __pyx_L3; } /*else*/ { /* "View.MemoryView":1155 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1156 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1157 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1159 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1141 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1162 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1173 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1174 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1176 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1177 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1178 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1178; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":1181 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1182 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1183 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1184 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1185 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1; } /* "View.MemoryView":1187 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1191 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1192 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1193 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src, order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; goto __pyx_L8; } __pyx_L8:; } /* "View.MemoryView":1195 * tmpslice.strides[i] = 0 * * if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1196 * * if slice_is_contig(src, order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); goto __pyx_L9; } /*else*/ { /* "View.MemoryView":1198 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1200 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1162 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1208 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1208; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1207 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1207; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1211 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1212 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_1 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyUnicode_Format(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_error, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1212; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1211 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1215 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1216 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1217 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_v_error, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1217; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /*else*/ { /* "View.MemoryView":1219 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ __Pyx_Raise(__pyx_v_error, 0, 0, 0); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1219; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } /* "View.MemoryView":1215 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1222 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1230 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1231 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1233 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1234 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1235 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1238 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); goto __pyx_L3; } /* "View.MemoryView":1240 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1241 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":1243 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1245 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1246 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1247 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1248 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1249 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; goto __pyx_L7; } /*else*/ { /* "View.MemoryView":1251 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1251; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } __pyx_L7:; goto __pyx_L6; } __pyx_L6:; /* "View.MemoryView":1253 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1254 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1254; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L8; } __pyx_L8:; } /* "View.MemoryView":1256 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(&src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1258 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig((&__pyx_v_src), __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1259 * * if not slice_is_contig(&src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); goto __pyx_L10; } __pyx_L10:; /* "View.MemoryView":1261 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1261; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1262 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; goto __pyx_L9; } __pyx_L9:; /* "View.MemoryView":1264 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1267 * * * if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1268 * * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(&src, 'F', ndim): * direct_copy = slice_is_contig(&dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'C', __pyx_v_ndim); goto __pyx_L12; } /* "View.MemoryView":1269 * if slice_is_contig(&src, 'C', ndim): * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(&dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1270 * direct_copy = slice_is_contig(&dst, 'C', ndim) * elif slice_is_contig(&src, 'F', ndim): * direct_copy = slice_is_contig(&dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'F', __pyx_v_ndim); goto __pyx_L12; } __pyx_L12:; /* "View.MemoryView":1272 * direct_copy = slice_is_contig(&dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1274 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1275 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1276 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1277 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1278 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; } goto __pyx_L11; } __pyx_L11:; /* "View.MemoryView":1280 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1283 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1283; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":1284 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 1284; __pyx_clineno = __LINE__; goto __pyx_L1_error;} goto __pyx_L14; } __pyx_L14:; /* "View.MemoryView":1286 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1287 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1288 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1290 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1291 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1222 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1294 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *slice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_slice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1298 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1300 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * slice.shape[i + offset] = slice.shape[i] * slice.strides[i + offset] = slice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1301 * * for i in range(ndim - 1, -1, -1): * slice.shape[i + offset] = slice.shape[i] # <<<<<<<<<<<<<< * slice.strides[i + offset] = slice.strides[i] * slice.suboffsets[i + offset] = slice.suboffsets[i] */ (__pyx_v_slice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->shape[__pyx_v_i]); /* "View.MemoryView":1302 * for i in range(ndim - 1, -1, -1): * slice.shape[i + offset] = slice.shape[i] * slice.strides[i + offset] = slice.strides[i] # <<<<<<<<<<<<<< * slice.suboffsets[i + offset] = slice.suboffsets[i] * */ (__pyx_v_slice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->strides[__pyx_v_i]); /* "View.MemoryView":1303 * slice.shape[i + offset] = slice.shape[i] * slice.strides[i + offset] = slice.strides[i] * slice.suboffsets[i + offset] = slice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_slice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1305 * slice.suboffsets[i + offset] = slice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * slice.shape[i] = 1 * slice.strides[i] = slice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1306 * * for i in range(offset): * slice.shape[i] = 1 # <<<<<<<<<<<<<< * slice.strides[i] = slice.strides[0] * slice.suboffsets[i] = -1 */ (__pyx_v_slice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1307 * for i in range(offset): * slice.shape[i] = 1 * slice.strides[i] = slice.strides[0] # <<<<<<<<<<<<<< * slice.suboffsets[i] = -1 * */ (__pyx_v_slice->strides[__pyx_v_i]) = (__pyx_v_slice->strides[0]); /* "View.MemoryView":1308 * slice.shape[i] = 1 * slice.strides[i] = slice.strides[0] * slice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_slice->suboffsets[__pyx_v_i]) = -1; } /* "View.MemoryView":1294 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *slice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1316 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1320 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1321 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); goto __pyx_L3; } __pyx_L3:; /* "View.MemoryView":1316 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1325 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1328 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1325 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1331 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1335 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1336 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1337 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1338 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); goto __pyx_L6; } /*else*/ { /* "View.MemoryView":1340 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; goto __pyx_L5; } /*else*/ { /* "View.MemoryView":1342 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1345 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1331 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1351 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1354 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1355 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1357 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1351 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1361 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1365 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1366 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1368 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1369 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1370 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1371 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } goto __pyx_L3; } /*else*/ { /* "View.MemoryView":1373 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1374 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1376 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1361 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return get_memview(o); } static PyMethodDef __pyx_methods_array[] = { {__Pyx_NAMESTR("__getattr__"), (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, __Pyx_DOCSTR(0)}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { 0, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { 0, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif #if PY_VERSION_HEX >= 0x02060000 __pyx_array_getbuffer, /*bf_getbuffer*/ #endif #if PY_VERSION_HEX >= 0x02060000 0, /*bf_releasebuffer*/ #endif }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) __Pyx_NAMESTR("glove.glove_cython.array"), /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #else 0, /*reserved*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ #if PY_VERSION_HEX >= 0x02060000 0, /*tp_version_tag*/ #endif #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) __Pyx_NAMESTR("glove.glove_cython.Enum"), /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #else 0, /*reserved*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ #if PY_VERSION_HEX >= 0x02060000 0, /*tp_version_tag*/ #endif #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) { Py_DECREF(o); o = 0; } return o; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_transpose(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview__get__base(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_shape(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_strides(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_suboffsets(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_ndim(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_itemsize(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_nbytes(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryview_get_size(o); } static PyMethodDef __pyx_methods_memoryview[] = { {__Pyx_NAMESTR("is_c_contig"), (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("is_f_contig"), (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("copy"), (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, __Pyx_DOCSTR(0)}, {__Pyx_NAMESTR("copy_fortran"), (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, __Pyx_DOCSTR(0)}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif #if PY_VERSION_HEX >= 0x02060000 __pyx_memoryview_getbuffer, /*bf_getbuffer*/ #endif #if PY_VERSION_HEX >= 0x02060000 0, /*bf_releasebuffer*/ #endif }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) __Pyx_NAMESTR("glove.glove_cython.memoryview"), /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #else 0, /*reserved*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ #if PY_VERSION_HEX >= 0x02060000 0, /*tp_version_tag*/ #endif #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if PY_VERSION_HEX >= 0x030400a1 if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_memoryviewslice__get__base(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, 0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) __Pyx_NAMESTR("glove.glove_cython._memoryviewslice"), /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #else 0, /*reserved*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ __Pyx_DOCSTR("Internal class for passing memoryview slices to Python"), /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ #if PY_VERSION_HEX >= 0x02060000 0, /*tp_version_tag*/ #endif #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif __Pyx_NAMESTR("glove_cython"), 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s__12, __pyx_k__12, sizeof(__pyx_k__12), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_alpha, __pyx_k_alpha, sizeof(__pyx_k_alpha), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_col, __pyx_k_col, sizeof(__pyx_k_col), 0, 0, 1, 1}, {&__pyx_n_s_collections, __pyx_k_collections, sizeof(__pyx_k_collections), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, {&__pyx_n_s_counts, __pyx_k_counts, sizeof(__pyx_k_counts), 0, 0, 1, 1}, {&__pyx_n_s_dim, __pyx_k_dim, sizeof(__pyx_k_dim), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_entry_weight, __pyx_k_entry_weight, sizeof(__pyx_k_entry_weight), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_epoch, __pyx_k_epoch, sizeof(__pyx_k_epoch), 0, 0, 1, 1}, {&__pyx_n_s_epochs, __pyx_k_epochs, sizeof(__pyx_k_epochs), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_fit_vectors, __pyx_k_fit_vectors, sizeof(__pyx_k_fit_vectors), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_glove_glove_cython, __pyx_k_glove_glove_cython, sizeof(__pyx_k_glove_glove_cython), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_gradient, __pyx_k_gradient, sizeof(__pyx_k_gradient), 0, 0, 1, 1}, {&__pyx_kp_s_home_chandras_Desktop_phrase2ve, __pyx_k_home_chandras_Desktop_phrase2ve, sizeof(__pyx_k_home_chandras_Desktop_phrase2ve), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_initial_learning_rate, __pyx_k_initial_learning_rate, sizeof(__pyx_k_initial_learning_rate), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_learning_rate, __pyx_k_learning_rate, sizeof(__pyx_k_learning_rate), 0, 0, 1, 1}, {&__pyx_n_s_loss, __pyx_k_loss, sizeof(__pyx_k_loss), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_max_count, __pyx_k_max_count, sizeof(__pyx_k_max_count), 0, 0, 1, 1}, {&__pyx_n_s_max_loss, __pyx_k_max_loss, sizeof(__pyx_k_max_loss), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_no_cooccurrences, __pyx_k_no_cooccurrences, sizeof(__pyx_k_no_cooccurrences), 0, 0, 1, 1}, {&__pyx_n_s_no_threads, __pyx_k_no_threads, sizeof(__pyx_k_no_threads), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_paragraphvec, __pyx_k_paragraphvec, sizeof(__pyx_k_paragraphvec), 0, 0, 1, 1}, {&__pyx_n_s_prediction, __pyx_k_prediction, sizeof(__pyx_k_prediction), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_row, __pyx_k_row, sizeof(__pyx_k_row), 0, 0, 1, 1}, {&__pyx_n_s_scipy_sparse, __pyx_k_scipy_sparse, sizeof(__pyx_k_scipy_sparse), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_shuffle_index, __pyx_k_shuffle_index, sizeof(__pyx_k_shuffle_index), 0, 0, 1, 1}, {&__pyx_n_s_shuffle_indices, __pyx_k_shuffle_indices, sizeof(__pyx_k_shuffle_indices), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_sp, __pyx_k_sp, sizeof(__pyx_k_sp), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_sum_gradients, __pyx_k_sum_gradients, sizeof(__pyx_k_sum_gradients), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_transform_paragraph, __pyx_k_transform_paragraph, sizeof(__pyx_k_transform_paragraph), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_word_a, __pyx_k_word_a, sizeof(__pyx_k_word_a), 0, 0, 1, 1}, {&__pyx_n_s_word_b, __pyx_k_word_b, sizeof(__pyx_k_word_b), 0, 0, 1, 1}, {&__pyx_n_s_wordbias, __pyx_k_wordbias, sizeof(__pyx_k_wordbias), 0, 0, 1, 1}, {&__pyx_n_s_wordbias_sum_gradients, __pyx_k_wordbias_sum_gradients, sizeof(__pyx_k_wordbias_sum_gradients), 0, 0, 1, 1}, {&__pyx_n_s_wordvec, __pyx_k_wordvec, sizeof(__pyx_k_wordvec), 0, 0, 1, 1}, {&__pyx_n_s_wordvec_sum_gradients, __pyx_k_wordvec_sum_gradients, sizeof(__pyx_k_wordvec_sum_gradients), 0, 0, 1, 1}, {&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 70; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 386; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION >= 3 __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #else __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 514; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 569; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 789; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":127 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":130 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if isinstance(format, unicode): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":142 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":170 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":186 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 186; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":445 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 445; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "View.MemoryView":521 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([self.view.strides[i] for i in xrange(self.view.ndim)]) */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":638 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_tuple__8 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 638; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":641 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_tuple__9 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 641; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":652 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_tuple__10 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 652; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":660 * for i in range(ndim): * if suboffsets[i] >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "glove/glove_cython.pyx":20 * * * def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[:, ::1] wordvec_sum_gradients, * double[::1] wordbias, */ __pyx_tuple__13 = PyTuple_Pack(26, __pyx_n_s_wordvec, __pyx_n_s_wordvec_sum_gradients, __pyx_n_s_wordbias, __pyx_n_s_wordbias_sum_gradients, __pyx_n_s_row, __pyx_n_s_col, __pyx_n_s_counts, __pyx_n_s_shuffle_indices, __pyx_n_s_initial_learning_rate, __pyx_n_s_max_count, __pyx_n_s_alpha, __pyx_n_s_max_loss, __pyx_n_s_no_threads, __pyx_n_s_dim, __pyx_n_s_no_cooccurrences, __pyx_n_s_word_a, __pyx_n_s_word_b, __pyx_n_s_count, __pyx_n_s_learning_rate, __pyx_n_s_gradient, __pyx_n_s_prediction, __pyx_n_s_entry_weight, __pyx_n_s_loss, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_shuffle_index); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(13, 0, 26, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_chandras_Desktop_phrase2ve, __pyx_n_s_fit_vectors, 20, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "glove/glove_cython.pyx":111 * * * def transform_paragraph(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordbias, * double[::1] paragraphvec, */ __pyx_tuple__15 = PyTuple_Pack(25, __pyx_n_s_wordvec, __pyx_n_s_wordbias, __pyx_n_s_paragraphvec, __pyx_n_s_sum_gradients, __pyx_n_s_row, __pyx_n_s_counts, __pyx_n_s_shuffle_indices, __pyx_n_s_initial_learning_rate, __pyx_n_s_max_count, __pyx_n_s_alpha, __pyx_n_s_epochs, __pyx_n_s_dim, __pyx_n_s_no_cooccurrences, __pyx_n_s_word_b, __pyx_n_s_word_a, __pyx_n_s_count, __pyx_n_s_prediction, __pyx_n_s_entry_weight, __pyx_n_s_loss, __pyx_n_s_gradient, __pyx_n_s_epoch, __pyx_n_s_i, __pyx_n_s_j, __pyx_n_s_shuffle_index, __pyx_n_s_learning_rate); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(11, 0, 25, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_chandras_Desktop_phrase2ve, __pyx_n_s_transform_paragraph, 111, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /* "View.MemoryView":276 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "View.MemoryView":277 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "View.MemoryView":278 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "View.MemoryView":281 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__20)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "View.MemoryView":282 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__21)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initglove_cython(void); /*proto*/ PyMODINIT_FUNC initglove_cython(void) #else PyMODINIT_FUNC PyInit_glove_cython(void); /*proto*/ PyMODINIT_FUNC PyInit_glove_cython(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_glove_cython(void)", 0); if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #ifdef __Pyx_CyFunction_USED if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4(__Pyx_NAMESTR("glove_cython"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; /*--- Initialize various global constants etc. ---*/ if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} #endif if (__pyx_module_is_main_glove__glove_cython) { if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}; } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} if (!PyDict_GetItemString(modules, "glove.glove_cython")) { if (unlikely(PyDict_SetItemString(modules, "glove.glove_cython", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} } } #endif /*--- Builtin init code ---*/ if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Constants init code ---*/ if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ if (PyType_Ready(&__pyx_type___pyx_array) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_array.tp_print = 0; __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 269; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_MemviewEnum.tp_print = 0; __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 302; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 922; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 922; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ /* "glove/glove_cython.pyx":4 * #cython: boundscheck=False, wraparound=False, cdivision=True, initializedcheck=False * * import numpy as np # <<<<<<<<<<<<<< * import scipy.sparse as sp * import collections */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 4; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "glove/glove_cython.pyx":5 * * import numpy as np * import scipy.sparse as sp # <<<<<<<<<<<<<< * import collections * from cython.parallel import parallel, prange */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s__12); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s__12); __Pyx_GIVEREF(__pyx_n_s__12); __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_sparse, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (PyDict_SetItem(__pyx_d, __pyx_n_s_sp, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 5; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "glove/glove_cython.pyx":6 * import numpy as np * import scipy.sparse as sp * import collections # <<<<<<<<<<<<<< * from cython.parallel import parallel, prange * */ __pyx_t_2 = __Pyx_Import(__pyx_n_s_collections, 0, -1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_collections, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 6; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "glove/glove_cython.pyx":20 * * * def fit_vectors(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[:, ::1] wordvec_sum_gradients, * double[::1] wordbias, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5glove_12glove_cython_1fit_vectors, NULL, __pyx_n_s_glove_glove_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_fit_vectors, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 20; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "glove/glove_cython.pyx":111 * * * def transform_paragraph(double[:, ::1] wordvec, # <<<<<<<<<<<<<< * double[::1] wordbias, * double[::1] paragraphvec, */ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_5glove_12glove_cython_3transform_paragraph, NULL, __pyx_n_s_glove_glove_cython); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_transform_paragraph, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 111; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "glove/glove_cython.pyx":1 * #!python # <<<<<<<<<<<<<< * #cython: boundscheck=False, wraparound=False, cdivision=True, initializedcheck=False * */ __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":203 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 203; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":276 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":277 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":278 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 278; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":281 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 281; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":282 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":496 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 496; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":953 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 953; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_2) < 0) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 953; __pyx_clineno = __LINE__; goto __pyx_L1_error;} __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "View.MemoryView":1361 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { __Pyx_AddTraceback("init glove.glove_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init glove.glove_cython"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* Runtime support code */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* CYTHON_REFNANNY */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) /* First char was not a digit */ PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; /* Consume from buffer string */ while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; /* breaks both loops as ctx->enc_count == 0 */ } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; /* empty struct */ field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; /* not a 'break' in the loop */ } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': /* substruct */ { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': /* end of substruct; either repeat or move on */ { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; /* Erase processed last struct element */ if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) { va_list vargs; char msg[200]; va_start(vargs, fmt); #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); Py_FatalError(msg); va_end(vargs); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; /* allow uninitialized memoryview assignment */ if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; #endif result = (*call)(func, arg, kw); #if PY_VERSION_HEX >= 0x02060000 Py_LeaveRecursiveCall(); #endif if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_Restore(type, value, tb); #endif } static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(type, value, tb); #endif } #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } #if PY_VERSION_HEX < 0x02050000 if (PyClass_Check(type)) { #else if (PyType_Check(type)) { #endif #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; #if PY_VERSION_HEX < 0x02050000 if (PyInstance_Check(type)) { type = (PyObject*) ((PyInstanceObject*)type)->in_class; Py_INCREF(type); } else { type = 0; PyErr_SetString(PyExc_TypeError, "raise: exception must be an old-style class or instance"); goto raise_error; } #else type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } #endif } __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else /* Python 3+ */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { if (PyObject_IsSubclass(instance_class, type)) { type = instance_class; } else { instance_class = NULL; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } } bad: Py_XDECREF(owned_instance); return; } #endif static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; #if CYTHON_PEP393_ENABLED if (unlikely(PyUnicode_READY(s1) < 0) || unlikely(PyUnicode_READY(s2) < 0)) return -1; #endif length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, length * kind); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_COMPILING_IN_CPYTHON #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { length = strlen(cstring); if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); #else PyErr_GetExcInfo(type, value, tb); #endif } static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) { #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(type, value, tb); #endif } static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { PyObject *local_type, *local_value, *local_tb; #if CYTHON_COMPILING_IN_CPYTHON PyObject *tmp_type, *tmp_value, *tmp_tb; PyThreadState *tstate = PyThreadState_GET(); local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_COMPILING_IN_CPYTHON tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_COMPILING_IN_CPYTHON PyThreadState *tstate = PyThreadState_GET(); tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #else PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (PyErr_ExceptionMatches(PyExc_OverflowError)) PyErr_Clear(); else return NULL; } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } } static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0) PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); #endif if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *getbuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer); if (getbuffer_cobj) { getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj); Py_DECREF(getbuffer_cobj); if (!func) goto fail; return func(obj, view, flags); } else { PyErr_Clear(); } } #endif PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); #if PY_VERSION_HEX < 0x02060000 fail: #endif return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; #if PY_VERSION_HEX >= 0x02060000 if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } #endif #if PY_VERSION_HEX < 0x02060000 if (obj->ob_type->tp_dict) { PyObject *releasebuffer_cobj = PyObject_GetItem( obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer); if (releasebuffer_cobj) { releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj); Py_DECREF(releasebuffer_cobj); if (!func) goto fail; func(obj, view); return; } else { PyErr_Clear(); } } #endif goto nofail; #if PY_VERSION_HEX < 0x02060000 fail: #endif PyErr_WriteUnraisable(obj); nofail: Py_DECREF(obj); view->obj = NULL; } #endif /* PY_MAJOR_VERSION < 3 */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; #if PY_VERSION_HEX >= 0x02050000 { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; /* try absolute import on failure */ } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } #else if (level>0) { PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4."); goto bad; } module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, NULL); #endif bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT | PyBUF_WRITABLE), 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \ { \ func_type value = func(x); \ if (sizeof(target_type) < sizeof(func_type)) { \ if (unlikely(value != (func_type) (target_type) value)) { \ func_type zero = 0; \ PyErr_SetString(PyExc_OverflowError, \ (is_unsigned && unlikely(value < zero)) ? \ "can't convert negative value to " #target_type : \ "value too large to convert to " #target_type); \ return (target_type) -1; \ } \ } \ return (target_type) value; \ } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(int) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(int)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(int) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(int) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong) } else if (sizeof(int) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs->memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize) return 0; itemsize *= mvs->shape[index]; } return 1; } static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0) cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned long long)) { return PyLong_FromUnsignedLongLong((unsigned long long) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(long long)) { return PyLong_FromLongLong((long long) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(char)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (char) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(char, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(char) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(char, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(char)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(char) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(char) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyLong_AsLong) } else if (sizeof(char) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(char, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return (long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (unlikely(Py_SIZE(x) < 0)) { PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong) } else if (sizeof(long) <= sizeof(unsigned long long)) { __PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong) } } else { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS if (sizeof(digit) <= sizeof(long)) { switch (Py_SIZE(x)) { case 0: return 0; case 1: return +(long) ((PyLongObject*)x)->ob_digit[0]; case -1: return -(long) ((PyLongObject*)x)->ob_digit[0]; } } #endif #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong) } else if (sizeof(long) <= sizeof(long long)) { __PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_Int(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_Int(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } } static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); #if PY_VERSION_HEX < 0x02050000 return PyErr_Warn(NULL, message); #else return PyErr_WarnEx(NULL, message, 1); #endif } return 0; } static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = (start + end) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, /*int argcount,*/ 0, /*int kwonlyargcount,*/ 0, /*int nlocals,*/ 0, /*int stacksize,*/ 0, /*int flags,*/ __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, /*int firstlineno,*/ __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_globals = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_globals = PyModule_GetDict(__pyx_m); if (!py_globals) goto bad; py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ py_globals, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else /* Python 3+ has unicode identifiers */ if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/ *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else /* PY_VERSION_HEX < 0x03030000 */ if (PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ return PyUnicode_AsUTF8AndSize(o, length); #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */ #endif /* PY_VERSION_HEX < 0x03030000 */ } else #endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */ #if !CYTHON_COMPILING_IN_PYPY #if PY_VERSION_HEX >= 0x02060000 if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return Py_INCREF(x), x; m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #endif #endif static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) return PyInt_AS_LONG(b); #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 #if CYTHON_USE_PYLONG_INTERNALS switch (Py_SIZE(b)) { case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0]; case 0: return 0; case 1: return ((PyLongObject*)b)->ob_digit[0]; } #endif #endif #if PY_VERSION_HEX < 0x02060000 return PyInt_AsSsize_t(b); #else return PyLong_AsSsize_t(b); #endif } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { #if PY_VERSION_HEX < 0x02050000 if (ival <= LONG_MAX) return PyInt_FromLong((long)ival); else { unsigned char *bytes = (unsigned char *) &ival; int one = 1; int little = (int)*(unsigned char*)&one; return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0); } #else return PyInt_FromSize_t(ival); #endif } #endif /* Py_PYTHON_H */
cpbtrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpbtrs.c, normal z -> c, Fri Sep 28 17:38:09 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_pbtrs * * Solves a system of linear equations A * X = B with a Hermitian positive definite * matrix A using the Cholesky factorization of A (i.e., A = L*L^T or A = U^T*U) * computed by plasma_cpbtrf. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in] kd * The number of suuperdiagonals within the band of A if uplo is upper, * or the number of suuperdiagonals if uplo is lower. kd >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of * columns of the matrix B. nrhs >= 0. * * @param[in,out] AB * The triangular factor U or L from the Cholesky * factorization A = U^H*U or A = L*L^H, computed by * plasma_cpotrf. * Remark: If out-of-place layout translation is used, the * matrix A can be considered as input, however if inplace * layout translation is enabled, the content of A will be * reordered for computation and restored before exiting the * function. * * @param[in] ldab * The leading dimension of the array AB. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_cpbtrs * @sa plasma_cpbtrs * @sa plasma_dpbtrs * @sa plasma_spbtrs * @sa plasma_cpbtrf * ******************************************************************************/ int plasma_cpbtrs(plasma_enum_t uplo, int n, int kd, int nrhs, plasma_complex32_t *pAB, int ldab, plasma_complex32_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (kd < 0) { plasma_error("illegal value of kd"); return -4; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -5; } if (ldab < imax(1, 1+kd)) { plasma_error("illegal value of ldab"); return -7; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -10; } // quick return if (imax(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_trsm(plasma, PlasmaComplexFloat, n, n); // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. int lm = nb*(1+(kd+nb-1)/nb); plasma_desc_t AB; plasma_desc_t B; int retval; retval = plasma_desc_general_band_create(PlasmaComplexFloat, uplo, nb, nb, lm, n, 0, 0, n, n, kd, kd, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&AB); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cpb2desc(pAB, ldab, AB, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_cpbtrs(uplo, AB, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&AB); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_pbtrs * * Solves a system of linear equations using previously * computed Cholesky factorization. * Non-blocking tile version of plasma_cpbtrs(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] AB * The triangular factor U or L from the Cholesky factorization * A = U^H*U or A = L*L^H, computed by plasma_cpotrf. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cpbtrs * @sa plasma_omp_cpbtrs * @sa plasma_omp_cpbtrs * @sa plasma_omp_dpbtrs * @sa plasma_omp_spbtrs * @sa plasma_omp_cpbtrf * ******************************************************************************/ void plasma_omp_cpbtrs(plasma_enum_t uplo, plasma_desc_t AB, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (AB.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pctbsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, AB, B, NULL, sequence, request); plasma_pctbsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, AB, B, NULL, sequence, request); }
core_zgeqrt.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_geqrt * * Computes a QR factorization of an m-by-n tile A: * The factorization has the form * \f[ * A = Q \times R * \f] * The tile Q is represented as a product of elementary reflectors * \f[ * Q = H(1) H(2) ... H(k), * \f] * where \f$ k = min(m,n) \f$. * * Each \f$ H(i) \f$ has the form * \f[ * H(i) = I - \tau \times v \times v^H * \f] * where \f$ tau \f$ is a scalar, and \f$ v \f$ is a vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:m) is stored on exit in A(i+1:m,i), * and \f$ tau \f$ in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A. m >= 0. * * @param[in] n * The number of columns of the tile A. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, the elements on and above the diagonal of the array * contain the min(m,n)-by-n upper trapezoidal tile R (R is * upper triangular if m >= n); the elements below the diagonal, * with the array tau, represent the unitary tile Q as a * product of elementary reflectors (see Further Details). * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * The ib-by-n triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliary workspace array of length n. * * @param work * Auxiliary workspace array of length ib*n. * * @param[in] lwork * Size of the array work. Should be at least ib*n. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_zgeqrt(int m, int n, int ib, plasma_complex64_t *A, int lda, plasma_complex64_t *T, int ldt, plasma_complex64_t *tau, plasma_complex64_t *work) { // Check input arguments. if (m < 0) { plasma_coreblas_error("illegal value of m"); return -1; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -2; } if ((ib < 0) || ( (ib == 0) && (m > 0) && (n > 0) )) { plasma_coreblas_error("illegal value of ib"); return -3; } if (A == NULL) { plasma_coreblas_error("NULL A"); return -4; } if (lda < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda"); return -5; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -6; } if (ldt < imax(1, ib) && ib > 0) { plasma_coreblas_error("illegal value of ldt"); return -7; } if (tau == NULL) { plasma_coreblas_error("NULL tau"); return -8; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -9; } // quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; int k = imin(m, n); for (int i = 0; i < k; i += ib) { int sb = imin(ib, k-i); LAPACKE_zgeqr2_work(LAPACK_COL_MAJOR, m-i, sb, &A[lda*i+i], lda, &tau[i], work); LAPACKE_zlarft_work(LAPACK_COL_MAJOR, lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m-i, sb, &A[lda*i+i], lda, &tau[i], &T[ldt*i], ldt); if (n > i+sb) { LAPACKE_zlarfb_work(LAPACK_COL_MAJOR, lapack_const(PlasmaLeft), lapack_const(Plasma_ConjTrans), lapack_const(PlasmaForward), lapack_const(PlasmaColumnwise), m-i, n-i-sb, sb, &A[lda*i+i], lda, &T[ldt*i], ldt, &A[lda*(i+sb)+i], lda, work, n-i-sb); } } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_zgeqrt(int m, int n, int ib, plasma_complex64_t *A, int lda, plasma_complex64_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A[0:lda*n]) \ depend(out:T[0:ib*n]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]); // Call the kernel. int info = plasma_core_zgeqrt(m, n, ib, A, lda, T, ldt, tau, tau+n); if (info != PlasmaSuccess) { plasma_error("core_zgeqrt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
update_ops_control_multi_target_single.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif void create_shift_mask_list_from_list_and_value_buf(const UINT* array, UINT count, UINT target, UINT* dst_array, ITYPE* dst_mask); //void multi_qubit_control_single_qubit_dense_matrix_gate_old_single(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void multi_qubit_control_single_qubit_dense_matrix_gate_old_parallel(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void multi_qubit_control_single_qubit_dense_matrix_gate_single(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void multi_qubit_control_single_qubit_dense_matrix_gate_single_stack(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //ITYPE* create_shift_mask_list(const UINT* array, UINT count, UINT target); void create_shift_mask_list_from_list_and_value_buf(const UINT* array, UINT count, UINT target, UINT* dst_array, ITYPE* dst_mask) { UINT size = count + 1; memcpy(dst_array, array, sizeof(UINT)*count); dst_array[count] = target; sort_ui(dst_array, size); for (UINT i = 0; i < size; ++i) { dst_mask[i] = (1UL << dst_array[i]) - 1; } } void multi_qubit_control_single_qubit_dense_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { //multi_qubit_control_single_qubit_dense_matrix_gate_old_single(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_old_parallel(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_single(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_single_stack(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); if (control_qubit_index_count == 1) { single_qubit_control_single_qubit_dense_matrix_gate(control_qubit_index_list[0], control_value_list[0], target_qubit_index, matrix, state, dim); return; } #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { multi_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); } else { multi_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); } #else multi_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { multi_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); } else { multi_qubit_control_single_qubit_dense_matrix_gate_parallel_unroll(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); } #else multi_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); #endif #endif } void multi_qubit_control_single_qubit_dense_matrix_gate_single_unroll(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; if (target_qubit_index == 0) { ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_0+1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_0+1] = matrix[2] * cval0 + matrix[3] * cval1; } } else if (sort_array[0] == 0) { ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } else { ITYPE state_index; for (state_index = 0; state_index < loop_dim; state_index+=2) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; CTYPE cval2 = state[basis_0+1]; CTYPE cval3 = state[basis_1+1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; state[basis_0+1] = matrix[0] * cval2 + matrix[1] * cval3; state[basis_1+1] = matrix[2] * cval2 + matrix[3] * cval3; } } } #ifdef _OPENMP void multi_qubit_control_single_qubit_dense_matrix_gate_parallel_unroll(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; if (target_qubit_index == 0) { ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_0 + 1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_0 + 1] = matrix[2] * cval0 + matrix[3] * cval1; } } else if (sort_array[0] == 0) { ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } else { ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; CTYPE cval2 = state[basis_0 + 1]; CTYPE cval3 = state[basis_1 + 1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; state[basis_0 + 1] = matrix[0] * cval2 + matrix[1] * cval3; state[basis_1 + 1] = matrix[2] * cval2 + matrix[3] * cval3; } } } #endif #ifdef _USE_SIMD void multi_qubit_control_single_qubit_dense_matrix_gate_single_simd(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; if (target_qubit_index == 0) { __m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2])); ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; double* ptr = (double*)(state + basis_0); __m256d data = _mm256_loadu_pd(ptr); __m256d data_u0 = _mm256_mul_pd(data, mv00); __m256d data_u1 = _mm256_mul_pd(data, mv01); __m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1); data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_d0 = _mm256_mul_pd(data, mv20); __m256d data_d1 = _mm256_mul_pd(data, mv21); __m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1); data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_r = _mm256_hadd_pd(data_u2, data_d2); data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 _mm256_storeu_pd(ptr, data_r); } } else if (sort_array[0] == 0) { ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } else { __m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0])); __m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1])); __m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2])); __m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3])); __m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3])); ITYPE state_index; for (state_index = 0; state_index < loop_dim; state_index += 2) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; double* ptr0 = (double*)(state + basis_0); double* ptr1 = (double*)(state + basis_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); __m256d data_u2 = _mm256_mul_pd(data0, mv00); __m256d data_u3 = _mm256_mul_pd(data1, mv10); __m256d data_u4 = _mm256_mul_pd(data0, mv01); __m256d data_u5 = _mm256_mul_pd(data1, mv11); __m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4); __m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5); __m256d data_d2 = _mm256_mul_pd(data0, mv20); __m256d data_d3 = _mm256_mul_pd(data1, mv30); __m256d data_d4 = _mm256_mul_pd(data0, mv21); __m256d data_d5 = _mm256_mul_pd(data1, mv31); __m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4); __m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5); __m256d data_r0 = _mm256_add_pd(data_u6, data_u7); __m256d data_r1 = _mm256_add_pd(data_d6, data_d7); _mm256_storeu_pd(ptr0, data_r0); _mm256_storeu_pd(ptr1, data_r1); } } } #ifdef _OPENMP void multi_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; if (target_qubit_index == 0) { __m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2])); ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; double* ptr = (double*)(state + basis_0); __m256d data = _mm256_loadu_pd(ptr); __m256d data_u0 = _mm256_mul_pd(data, mv00); __m256d data_u1 = _mm256_mul_pd(data, mv01); __m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1); data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_d0 = _mm256_mul_pd(data, mv20); __m256d data_d1 = _mm256_mul_pd(data, mv21); __m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1); data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_r = _mm256_hadd_pd(data_u2, data_d2); data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 _mm256_storeu_pd(ptr, data_r); } } else if (sort_array[0] == 0) { ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } else { __m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0])); __m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1])); __m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2])); __m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3])); __m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3])); ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; double* ptr0 = (double*)(state + basis_0); double* ptr1 = (double*)(state + basis_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); __m256d data_u2 = _mm256_mul_pd(data0, mv00); __m256d data_u3 = _mm256_mul_pd(data1, mv10); __m256d data_u4 = _mm256_mul_pd(data0, mv01); __m256d data_u5 = _mm256_mul_pd(data1, mv11); __m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4); __m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5); __m256d data_d2 = _mm256_mul_pd(data0, mv20); __m256d data_d3 = _mm256_mul_pd(data1, mv30); __m256d data_d4 = _mm256_mul_pd(data0, mv21); __m256d data_d5 = _mm256_mul_pd(data1, mv31); __m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4); __m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5); __m256d data_r0 = _mm256_add_pd(data_u6, data_u7); __m256d data_r1 = _mm256_add_pd(data_d6, data_d7); _mm256_storeu_pd(ptr0, data_r0); _mm256_storeu_pd(ptr1, data_r1); } } } #endif #endif /* void multi_qubit_control_single_qubit_dense_matrix_gate_old_single(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // insert index list const UINT insert_index_list_count = control_qubit_index_count + 1; UINT* insert_index_list = create_sorted_ui_list_value(control_qubit_index_list, control_qubit_index_count, target_qubit_index); // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // control mask ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_c_t0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, 1ULL << insert_index_list[cursor], insert_index_list[cursor]); } // flip controls basis_c_t0 ^= control_mask; // gather target ITYPE basis_c_t1 = basis_c_t0 ^ target_mask; // fetch values CTYPE cval_c_t0 = state[basis_c_t0]; CTYPE cval_c_t1 = state[basis_c_t1]; // set values state[basis_c_t0] = matrix[0] * cval_c_t0 + matrix[1] * cval_c_t1; state[basis_c_t1] = matrix[2] * cval_c_t0 + matrix[3] * cval_c_t1; } free(insert_index_list); } #ifdef _OPENMP void multi_qubit_control_single_qubit_dense_matrix_gate_old_parallel(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // insert index list const UINT insert_index_list_count = control_qubit_index_count + 1; UINT* insert_index_list = create_sorted_ui_list_value(control_qubit_index_list, control_qubit_index_count, target_qubit_index); // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // control mask ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_c_t0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, 1ULL << insert_index_list[cursor], insert_index_list[cursor]); } // flip controls basis_c_t0 ^= control_mask; // gather target ITYPE basis_c_t1 = basis_c_t0 ^ target_mask; // fetch values CTYPE cval_c_t0 = state[basis_c_t0]; CTYPE cval_c_t1 = state[basis_c_t1]; // set values state[basis_c_t0] = matrix[0] * cval_c_t0 + matrix[1] * cval_c_t1; state[basis_c_t1] = matrix[2] * cval_c_t0 + matrix[3] * cval_c_t1; } free(insert_index_list); } #endif ITYPE* create_shift_mask_list(const UINT* array, UINT count, UINT target) { UINT size = count + 1; UINT* new_array = (UINT*)malloc(sizeof(UINT)*size); memcpy(new_array, array, sizeof(UINT)*count); new_array[count] = target; sort_ui(new_array, size); ITYPE* mask_array = (ITYPE*)malloc(sizeof(ITYPE)*size); for (UINT i = 0; i < size; ++i) { mask_array[i] = (1UL << new_array[i]) - 1; } free(new_array); return mask_array; } void multi_qubit_control_single_qubit_dense_matrix_gate_single(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { ITYPE* shift_masks = create_shift_mask_list(control_qubit_index_list, control_qubit_index_count, target_qubit_index); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & shift_masks[cursor]) + ((basis_0 & (~shift_masks[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 ^ target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } free(shift_masks); } void multi_qubit_control_single_qubit_dense_matrix_gate_single_stack(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } */
GB_binop__ge_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_08__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_02__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_04__ge_bool) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_bool) // A*D function (colscale): GB (_AxD__ge_bool) // D*A function (rowscale): GB (_DxB__ge_bool) // C+=B function (dense accum): GB (_Cdense_accumB__ge_bool) // C+=b function (dense accum): GB (_Cdense_accumb__ge_bool) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_bool) // C=scalar+B GB (_bind1st__ge_bool) // C=scalar+B' GB (_bind1st_tran__ge_bool) // C=A+scalar GB (_bind2nd__ge_bool) // C=A'+scalar GB (_bind2nd_tran__ge_bool) // C type: bool // A type: bool // A pattern? 0 // B type: bool // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ bool aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ bool bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_BOOL || GxB_NO_GE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ge_bool) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ge_bool) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ge_bool) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ge_bool) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; bool alpha_scalar ; bool beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((bool *) alpha_scalar_in)) ; beta_scalar = (*((bool *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ge_bool) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ge_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ge_bool) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ge_bool) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ge_bool) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; bool bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ge_bool) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; bool aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__ge_bool) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__ge_bool) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
eval_cf.c
/******************************************************************************* * 2pt/eval_cf.c: this file is part of the FCFC program. * FCFC: Fast Correlation Function Calculator. * Github repository: https://github.com/cheng-zhao/FCFC * Copyright (c) 2020 -- 2021 Cheng Zhao <[email protected]> [MIT license] *******************************************************************************/ #include "eval_cf.h" #include "count_func.h" #include "read_file.h" #include "read_res.h" #include "save_res.h" #include "legpoly.h" #include "build_tree.h" #include <stdlib.h> #ifdef OMP #include <omp.h> #endif /*============================================================================*\ Macros for error handling \*============================================================================*/ #define CLEAN_TREE \ for (int ii = 0; ii < cf->ncat; ii++) \ tree_destroy(tree[ii], FCFC_TREE_TYPE_KDTREE); \ free(tree); /*============================================================================*\ Functions for steps of correlation function evaluations \*============================================================================*/ /****************************************************************************** Function `eval_pairs`: Evaluate pair counts for the input catalogs. Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ static int eval_pairs(const CONF *conf, CF *cf) { /* Allocate memory for trees. */ void **tree; if (!(tree = malloc(sizeof(void *) * cf->ncat))) { P_ERR("failed to allocate memory for trees\n"); return FCFC_ERR_MEMORY; } for (int i = 0; i < cf->ncat; i++) tree[i] = NULL; int ny; if (cf->bintype == FCFC_BIN_SPI) ny = cf->np; else ny = cf->nmu; /* cf->bintype == FCFC_BIN_ISO or FCFC_BIN_SMU */ for (int i = 0; i < cf->npc; i++) { /* Read pair counts from file if applicable. */ if (!cf->comp_pc[i]) { printf("Reading %s pairs ...", conf->pc[i]); if (conf->verbose) printf("\n Filename: %s\n", conf->pcout[i]); fflush(stdout); int e = read_pair_count(conf->pcout[i], cf->ns, ny, cf->ncnt[i]); if (e) { CLEAN_TREE; return e; } printf(FMT_DONE); continue; } int cat[2]; cat[0] = cf->pc_idx[0][i]; cat[1] = cf->pc_idx[1][i]; bool usewt = cf->wt[cat[0]] || cf->wt[cat[1]]; /* Read catalogue and construct the tree if necessary. */ for (int j = 0; j < 2; j++) { int idx = cat[j]; if (!tree[idx]) tree[idx] = tree_create(conf, cf, idx, FCFC_TREE_TYPE_KDTREE); if (!tree[idx]) { CLEAN_TREE; return FCFC_ERR_TREE; } } /* Count pairs. */ printf("Counting %c%c pairs ...", cf->label[cat[0]], cf->label[cat[1]]); if (conf->verbose) printf("\n"); fflush(stdout); if (cat[0] == cat[1]) { /* auto pairs */ count_pairs(tree[cat[0]], tree[cat[0]], cf, cf->cnt[i], true, usewt); /* Double auto pairs. */ if (usewt) { for (size_t k = 0; k < cf->ntot; k++) cf->cnt[i][k].d *= 2; cf->norm[i] = cf->wdata[cat[0]] * (cf->wdata[cat[0]] - 1); } else { for (size_t k = 0; k < cf->ntot; k++) cf->cnt[i][k].i *= 2; cf->norm[i] = (double) cf->ndata[cat[0]] * (cf->ndata[cat[0]] - 1); } } else { /* cross counts */ count_pairs(tree[cat[0]], tree[cat[1]], cf, cf->cnt[i], false, usewt); if (usewt) cf->norm[i] = cf->wdata[cat[0]] * cf->wdata[cat[1]]; else cf->norm[i] = (double) cf->ndata[cat[0]] * cf->ndata[cat[1]]; } /* Normalise pair counts. */ if (usewt) { for (size_t k = 0; k < cf->ntot; k++) cf->ncnt[i][k] = cf->cnt[i][k].d / cf->norm[i]; } else { for (size_t k = 0; k < cf->ntot; k++) cf->ncnt[i][k] = cf->cnt[i][k].i / cf->norm[i]; } /* Save pair counts. */ int e = save_res(conf, cf, i, FCFC_OUTPUT_PAIR_COUNT); if (e) { CLEAN_TREE; return e; } /* Release memory if necessary. */ for (int j = 0; j < 2; j++) { bool free_data = true; for (int k = i + 1; k < cf->npc; k++) { if (cat[j] == cf->pc_idx[0][k] || cat[j] == cf->pc_idx[1][k]) { free_data = false; break; } } if (free_data) { free(cf->data[cat[j]]); cf->data[cat[j]] = NULL; tree_destroy(tree[cat[j]], FCFC_TREE_TYPE_KDTREE); tree[cat[j]] = NULL; } } printf(FMT_DONE); } CLEAN_TREE; return 0; } /****************************************************************************** Function `eval_cf_exp`: Evaluate correlation functions given the expressions for estimators. Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ static int eval_cf_exp(const CONF *conf, CF *cf) { printf("Evaluate correlation function estimators ..."); if (conf->verbose) printf("\n"); fflush(stdout); /* Prepare the array of normalised pair counts, for libast evaluations. */ #ifdef OMP double *pc = calloc((size_t) cf->nthread * cf->npc, sizeof(double)); #else double *pc = calloc(cf->npc, sizeof(double)); #endif if (!pc) { P_ERR("failed to allocate memory for 2PCF evaluation\n"); return FCFC_ERR_MEMORY; } for (int i = 0; i < cf->ncf; i++) { #ifdef OMP #pragma omp parallel num_threads(cf->nthread) firstprivate(pc) { const int tid = omp_get_thread_num(); pc += (size_t) tid * cf->npc; #pragma omp for #endif for (size_t j = 0; j < cf->ntot; j++) { /* Set pair counts to be passed to libast. */ for (int k = 0; k < cf->npc; k++) pc[k] = cf->ncnt[k][j]; /* Evaluate the 2PCF. */ if (ast_eval_num(cf->ast_cf[i], cf->cf[i] + j, pc, cf->npc)) { ast_perror(cf->ast_cf[i], stderr, FMT_ERR " failed to evaluate 2PCF:"); #ifdef OMP exit(FCFC_ERR_AST); #else free(pc); return FCFC_ERR_AST; #endif } } #ifdef OMP } #endif /* Save the correlation function. */ int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_RAW); if (e) { free(pc); return e; } } free(pc); printf(FMT_DONE); return 0; } /****************************************************************************** Function `eval_cf_mp`: Evaluate correlation function multipoles given xi(s,mu). Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ static int eval_cf_mp(const CONF *conf, CF *cf) { printf("Compute correlation function multipoles ..."); if (conf->verbose) printf("\n"); fflush(stdout); for (int i = 0; i < cf->ncf; i++) { for (int l = 0; l < cf->nl; l++) { int ell = cf->poles[l]; double fac = (2 * ell + 1) / (double) cf->nmu; for (int j = 0; j < cf->nmu; j++) { double mu = (j + 0.5) / (double) cf->nmu; for (int k = 0; k < cf->ns; k++) { cf->mp[i][k + l * cf->ns] += cf->cf[i][k + j * cf->ns] * fac * legpoly(ell, mu); } } } /* Save the correlation function multipoles. */ int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_INTEG); if (e) return e; } printf(FMT_DONE); return 0; } /****************************************************************************** Function `eval_cf_wp`: Evaluate projected correlation functions given xi(s_perp,pi). Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ static int eval_cf_wp(const CONF *conf, CF *cf) { printf("Compute projected correlation functions ..."); if (conf->verbose) printf("\n"); fflush(stdout); for (int i = 0; i < cf->ncf; i++) { for (int j = 0; j < cf->np; j++) { double dpi = cf->pbin[j + 1] - cf->pbin[j]; for (int k = 0; k < cf->ns; k++) { cf->wp[i][k] += 2 * cf->cf[i][k + j * cf->ns] * dpi; } } /* Save the projected correlation functions. */ int e = save_res(conf, cf, i, FCFC_OUTPUT_2PCF_INTEG); if (e) return e; } printf(FMT_DONE); return 0; } /*============================================================================*\ Interface for correlation function evaluations \*============================================================================*/ /****************************************************************************** Function `eval_cf`: Evaluate correlation functions. Arguments: * `conf`: structure for storing configurations; * `cf`: structure for correlation function evaluations. Return: Zero on success; non-zero on error. ******************************************************************************/ int eval_cf(const CONF *conf, CF *cf) { int e; if ((e = eval_pairs(conf, cf))) return e; if (cf->ncf) if ((e = eval_cf_exp(conf, cf))) return e; if (cf->nl) if ((e = eval_cf_mp(conf, cf))) return e; if (cf->comp_wp) if ((e = eval_cf_wp(conf, cf))) return e; return 0; }
exch.c
#include "micro_clib.h" void compute_exch_field_micro(double *restrict m, double *restrict field, double *restrict energy, double *restrict Ms_inv, double A, double dx, double dy, double dz, int n, int *restrict ngbs) { /* Compute the micromagnetic exchange field and energy using the * matrix of neighbouring spins and a second order approximation * for the derivative * * Ms_inv :: Array with the (1 / Ms) values for every mesh node. * The values are zero for points with Ms = 0 (no material) * * A :: Exchange constant * * dx, dy, dz :: Mesh spacings in the corresponding directions * * n :: Number of mesh nodes * * ngbs :: The array of neighbouring spins, which has (6 * n) * entries. Specifically, it contains the indexes of * the neighbours of every mesh node, in the following order: * -x, +x, -y, +y, -z, +z * * Thus, the array is like: * | 0-x, 0+x, 0-y, 0+y, 0-z, 0+z, 1-x, 1+x, 1-y, ... | * i=0 i=1 ... * * where 0-y is the index of the neighbour of the 0th spin, * in the -y direction, for example. The index value for a * neighbour where Ms = 0, is evaluated as -1. The array * automatically gives periodic boundaries. * * A basic example is a 3 x 3 two dimensional mesh with PBCs * in the X and Y direction: * * +-----------+ * | 6 | 7 | 8 | * +-----------+ * | 3 | 4 | 5 | * +-----------+ * | 0 | 1 | 2 | * +-----------+ * * so, the first 6 entries (neighbours of the 0th mesh node) * of the array would be: [ 2 1 6 3 -1 -1 ... ] * (-1 since there is no material in +-z, and a '2' first, * since it is the left neighbour which is the PBC in x, etc..) * * For the exchange computation, the field is defined as: * H_ex = (2 * A / (mu0 * Ms)) * nabla^2 (mx, my, mz) * * Therefore, for the i-th mesh node (spin), we approximate the * derivatives as: * nabla^2 mx = (1 / dx^2) * ( m[i-x] - 2 * m[i] + m[i+x] ) + * (1 / dy^2) * ( m[i-y] - 2 * m[i] + m[i+y] ) + * (1 / dz^2) * ( m[i-z] - 2 * m[i] + m[i+z] ) * * Where i-x is the neighbour in the -x direction. This is similar * for my and mz. * We can notice that the sum is the same if we do: * ( m[i-x] - m[i] ) + ( m[i+x] - m[i] ) * so we can iterate through the neighbours and perform the sum with the * corresponding coefficient 1 /dx, 1/dy or 1/dz * * The *m array contains the spins as: * [mx0, my0, mz0, mx1, my1, mz1, mx2, ...] * so if we want the starting position of the magnetisation for the * i-th spin, we only have to do (3 * i) for mx, (3 * i + 1) for my * and (3 * i + 2) for mz * * * IMPORTANT: The ex field usually has the structure: * 2 * A / (mu0 Ms ) * (Second derivative of M) * When discretising the derivative, it carries a "2" in the * denominator which we "cancel" with the "2" in the prefactor, * hence we do not put it explicitly in the calculations * * So, when computing the energy: (-1/2) * mu * Ms * H_ex * we only put the 0.5 factor and don't worry about the "2"s in the * field * */ /* Define the coefficients */ double ax = 2 * A / (dx * dx); double ay = 2 * A / (dy * dy); double az = 2 * A / (dz * dz); /* Here we iterate through every mesh node */ #pragma omp parallel for for (int i = 0; i < n; i++) { double fx = 0, fy = 0, fz = 0; int idnm = 0; // Index for the magnetisation matrix int idn = 6 * i; // index for the neighbours /* Set a zero field for sites without magnetic material */ if (Ms_inv[i] == 0.0){ field[3 * i] = 0; field[3 * i + 1] = 0; field[3 * i + 2] = 0; continue; } /* Here we iterate through the neighbours */ for (int j = 0; j < 6; j++) { /* Remember that index=-1 is for sites without material */ if (ngbs[idn + j] >= 0) { /* Magnetisation of the neighbouring spin since ngbs gives * the neighbour's index */ idnm = 3 * ngbs[idn + j]; /* Check that the magnetisation of the neighbouring spin * is larger than zero */ if (Ms_inv[ngbs[idn + j]] > 0){ /* Neighbours in the -x and +x directions * giving: ( m[i-x] - m[i] ) + ( m[i+x] - m[i] ) * when ngbs[idn + j] > 0 for j = 0 and j=1 * If, for example, there is no * neighbour at -x (j=0) in the 0th node (no PBCs), * the second derivative would only be avaluated as: * (1 / dx * dx) * ( m[i+x] - m[i] ) * which, according to * [M.J. Donahue and D.G. Porter; Physica B, 343, 177-183 (2004)] * when performing the integration of the energy, we still * have error of the order O(dx^2) * This same applies for the other directions */ if (j == 0 || j == 1) { fx += ax * (m[idnm] - m[3 * i]); fy += ax * (m[idnm + 1] - m[3 * i + 1]); fz += ax * (m[idnm + 2] - m[3 * i + 2]); } /* Neighbours in the -y and +y directions */ else if (j == 2 || j == 3) { fx += ay * (m[idnm] - m[3 * i]); fy += ay * (m[idnm + 1] - m[3 * i + 1]); fz += ay * (m[idnm + 2] - m[3 * i + 2]); } /* Neighbours in the -z and +z directions */ else if (j == 4 || j == 5) { fx += az * (m[idnm] - m[3 * i]); fy += az * (m[idnm + 1] - m[3 * i + 1]); fz += az * (m[idnm + 2] - m[3 * i + 2]); } else { continue; } } } } /* Energy as: (-mu0 * Ms / 2) * [ H_ex * m ] */ energy[i] = -0.5 * (fx * m[3 * i] + fy * m[3 * i + 1] + fz * m[3 * i + 2]); /* Update the field H_ex which has the same structure than *m */ field[3 * i] = fx * Ms_inv[i] * MU0_INV; field[3 * i + 1] = fy * Ms_inv[i] * MU0_INV; field[3 * i + 2] = fz * Ms_inv[i] * MU0_INV; } } inline int get_index(int nx, int ny, int i, int j, int k){ return k * nx*ny + j * nx + i; } void compute_exch_field_rkky_micro(double *m, double *field, double *energy, double *Ms_inv, double sigma, int nx, double ny, double nz, int z_bottom, int z_top){ /* Compute the micromagnetic exchange field and energy using the * matrix of neighbouring spins and a second order approximation * for the derivative * * Ms_inv :: Array with the (1 / Ms) values for every mesh node. * The values are zero for points with Ms = 0 (no material) * * sigma :: Exchange constant * * nx, ny, nz :: Mesh dimensions. * The exchange field at the top (bottom) layer can be computed as: * * H_top = (sigma / (mu0 * Ms)) * m_bottom * H_bottom = (sigma / (mu0 * Ms)) * m_top * * The *m array contains the spins as: * [mx0, my0, mz0, mx1, my1, mz1, mx2, ...] * so if we want the starting position of the magnetisation for the * i-th spin, we only have to do (3 * i) for mx, (3 * i + 1) for my * and (3 * i + 2) for mz * * * */ int n = nx*ny*nz; for (int i = 0; i < n; i++){ energy[i] = 0; field[3*i]=0; field[3*i+1]=0; field[3*i+2]=0; } #pragma omp parallel for for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++){ double mtx=0, mty=0, mtz=0; double mbx=0, mby=0, mbz=0; int id1 = get_index(nx,ny, i, j, z_bottom); int id2 = get_index(nx,ny, i, j, z_top); mtx = m[3*id2]; mty = m[3*id2+1]; mtz = m[3*id2+2]; mbx = m[3*id1]; mby = m[3*id1+1]; mbz = m[3*id1+2]; if (Ms_inv[id1] != 0.0){ energy[id1] = sigma*(1-mtx*mbx-mty*mby-mtz*mbz); field[3*id1] = sigma * mtx * Ms_inv[id1] * MU0_INV; field[3*id1+1] = sigma * mty * Ms_inv[id1] * MU0_INV; field[3*id1+2] = sigma * mtz * Ms_inv[id1] * MU0_INV; } if (Ms_inv[id2] != 0.0){ energy[id2] = sigma*(1-mtx*mbx-mty*mby-mtz*mbz); field[3*id2] = sigma * mbx * Ms_inv[id2] * MU0_INV; field[3*id2+1] = sigma * mby * Ms_inv[id2] * MU0_INV; field[3*id2+2] = sigma * mbz * Ms_inv[id2] * MU0_INV; } } } }
GB_unaryop__lnot_bool_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_uint64 // op(A') function: GB_tran__lnot_bool_uint64 // C type: bool // A type: uint64_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_uint64 ( bool *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bxnor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint16) // A*D function (colscale): GB (_AxD__bxnor_uint16) // D*A function (rowscale): GB (_DxB__bxnor_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint16) // C=scalar+B GB (_bind1st__bxnor_uint16) // C=scalar+B' GB (_bind1st_tran__bxnor_uint16) // C=A+scalar GB (_bind2nd__bxnor_uint16) // C=A'+scalar GB (_bind2nd_tran__bxnor_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_UINT16 || GxB_NO_BXNOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ImageIOBase.h
/** \file ImageIOBase.h \brief Define the base IO funcatinality for image_analyze_io toolbox \author Hui Xue */ #pragma once #include <iostream> #include <typeinfo> #include "ImageIOExport.h" #include "NDArray.h" #include "complext.h" #include "GadgetronException.h" #include "hoNDArray.h" #include "hoNDImage.h" #include "hoNDArray_fileio.h" namespace Gadgetron { struct rgb_type { unsigned char r,g,b; }; struct rgba_type { unsigned char r,g,b,a; }; class EXPORTIMAGEIO ImageIOWorker { public: ImageIOWorker(const std::string& ioTag, bool readFlag=true); virtual ~ImageIOWorker(); // open the file stream // readFlag: true, read mode; false, write mode virtual bool open(); // close the file stream virtual bool close(); // the current file offset long tell(); // set the file offset bool seek(long long offset); // reset the file to the beginning bool reset() { return (this->seek(0)); } // check the status of i/o operations bool IOinError(); // read/write // len: number of bytes in data bool read(char* data, long long len); bool write(const char* data, long long len); protected: std::string ioTag_; std::fstream fid_; bool readFlag_; }; #ifdef DT_UNKNOWN #undef DT_UNKNOWN #endif // DT_UNKNOWN enum ImageIODataType { DT_ANA_UNKNOWN =0, DT_NONE =0, DT_UNKNOWN =0, /* what it says, dude */ DT_BINARY =1, /* binary (1 bit/voxel) */ DT_UNSIGNED_CHAR =2, /* unsigned char (8 bits/voxel) */ DT_SIGNED_SHORT =4, /* signed short (16 bits/voxel) */ DT_UNSIGNED_SHORT =5, DT_SIGNED_INT =8, /* signed int (32 bits/voxel) */ DT_UNSIGNED_INT =9, DT_FLOAT =16, /* float (32 bits/voxel) */ DT_COMPLEX =32, /* complex (64 bits/voxel) */ DT_DOUBLE =64, /* double (64 bits/voxel) */ DT_RGB =128, /* RGB triple (24 bits/voxel) */ DT_ALL =255, /* not very useful (?) */ /*----- another set of names for the same ---*/ DT_UINT8 =2, DT_INT16 =4, DT_INT32 =8, DT_FLOAT32 =16, DT_COMPLEX64 =32, DT_FLOAT64 =64, DT_RGB24 =128, /*------------------- new codes for NIFTI ---*/ DT_INT8 =256, /* signed char (8 bits) */ DT_UINT16 =512, /* unsigned short (16 bits) */ DT_UINT32 =768, /* unsigned int (32 bits) */ DT_INT64 =1024, /* long long (64 bits) */ DT_UINT64 =1280, /* unsigned long long (64 bits) */ DT_FLOAT128 =1536, /* long double (128 bits) */ DT_COMPLEX128 =1792, /* double pair (128 bits) */ DT_COMPLEX256 =2048, /* long double pair (256 bits) */ DT_RGBA32 =2304, /* 4 byte RGBA (32 bits/voxel) */ }; template <typename HeaderType> class ImageIOBase { public: typedef HeaderType THeaderType; ImageIOBase() { pixelSize_.resize(10, 1.0); } ImageIOBase(float px, float py) { pixelSize_.resize(2); pixelSize_[0] = px; pixelSize_[1] = py; } ImageIOBase(float px, float py, float pz) { pixelSize_.resize(3); pixelSize_[0] = px; pixelSize_[1] = py; pixelSize_[2] = pz; } ImageIOBase(float px, float py, float pz, float pt) { pixelSize_.resize(4); pixelSize_[0] = px; pixelSize_[1] = py; pixelSize_[2] = pz; pixelSize_[3] = pt; } ImageIOBase(float px, float py, float pz, float pt, float pr) { pixelSize_.resize(5); pixelSize_[0] = px; pixelSize_[1] = py; pixelSize_[2] = pz; pixelSize_[3] = pt; pixelSize_[4] = pr; } ImageIOBase(float px, float py, float pz, float pt, float pr, float ps) { pixelSize_.resize(6); pixelSize_[0] = px; pixelSize_[1] = py; pixelSize_[2] = pz; pixelSize_[3] = pt; pixelSize_[4] = pr; pixelSize_[5] = ps; } ImageIOBase(float px, float py, float pz, float pt, float pr, float ps, float pp) { pixelSize_.resize(7); pixelSize_[0] = px; pixelSize_[1] = py; pixelSize_[2] = pz; pixelSize_[3] = pt; pixelSize_[4] = pr; pixelSize_[5] = ps; pixelSize_[6] = pp; } ImageIOBase(float px, float py, float pz, float pt, float pr, float ps, float pp, float pq) { pixelSize_.resize(8); pixelSize_[0] = px; pixelSize_[1] = py; pixelSize_[2] = pz; pixelSize_[3] = pt; pixelSize_[4] = pr; pixelSize_[5] = ps; pixelSize_[6] = pp; pixelSize_[7] = pq; } void set_pixel_size(float px, float py, float pz=1.0f, float pt=1.0f, float pr=1.0f, float ps=1.0f, float pp=1.0f, float pq=1.0f) { pixelSize_.resize(8); pixelSize_[0] = px; pixelSize_[1] = py; pixelSize_[2] = pz; pixelSize_[3] = pt; pixelSize_[4] = pr; pixelSize_[5] = ps; pixelSize_[6] = pp; pixelSize_[7] = pq; } void set_pixel_size(double px, double py, double pz=1.0, double pt=1.0, double pr=1.0, double ps=1.0, double pp=1.0, double pq=1.0) { pixelSize_.resize(8); pixelSize_[0] = (float)px; pixelSize_[1] = (float)py; pixelSize_[2] = (float)pz; pixelSize_[3] = (float)pt; pixelSize_[4] = (float)pr; pixelSize_[5] = (float)ps; pixelSize_[6] = (float)pp; pixelSize_[7] = (float)pq; } virtual ~ImageIOBase() { } /// export/input for 2D/3D/4D array /// filename should be given without extension virtual void export_array(const hoNDArray<short>& a, const std::string& filename) = 0; virtual void export_array(const hoNDArray<unsigned short>& a, const std::string& filename) = 0; virtual void export_array(const hoNDArray<int>& a, const std::string& filename) = 0; virtual void export_array(const hoNDArray<unsigned int>& a, const std::string& filename) = 0; virtual void export_array(const hoNDArray<float>& a, const std::string& filename) = 0; virtual void export_array(const hoNDArray<double>& a, const std::string& filename) = 0; virtual void export_array(const hoNDArray< std::complex<float> >& a, const std::string& filename) = 0; virtual void export_array(const hoNDArray< std::complex<double> >& a, const std::string& filename) = 0; virtual void import_array(hoNDArray<short>& a, const std::string& filename) = 0; virtual void import_array(hoNDArray<unsigned short>& a, const std::string& filename) = 0; virtual void import_array(hoNDArray<int>& a, const std::string& filename) = 0; virtual void import_array(hoNDArray<unsigned int>& a, const std::string& filename) = 0; virtual void import_array(hoNDArray<float>& a, const std::string& filename) = 0; virtual void import_array(hoNDArray<double>& a, const std::string& filename) = 0; virtual void import_array(hoNDArray< std::complex<float> >& a, const std::string& filename) = 0; virtual void import_array(hoNDArray< std::complex<double> >& a, const std::string& filename) = 0; template <typename T> void export_array_complex_real_imag(const hoNDArray<T>& a, const std::string& filename) { try { typedef typename Gadgetron::realType<T>::Type value_type; hoNDArray<value_type> buf(a.dimensions()); long long num = (long long)a.get_number_of_elements(); long long n; #pragma omp parallel for default(none) private(n) shared(num, a, buf) for ( n=0; n<num; n++ ) { buf(n) = a(n).real(); } std::string filenameReal = filename; filenameReal.append("_REAL"); export_array(buf, filenameReal); #pragma omp parallel for default(none) private(n) shared(num, a, buf) for ( n=0; n<num; n++ ) { buf(n) = a(n).imag(); } std::string filenameImag = filename; filenameImag.append("_IMAG"); export_array(buf, filenameImag); } catch(...) { GADGET_THROW("Errors in export_array_complex_real_imag(const hoNDArray<T>& a, const std::string& filename) ... "); } } template <typename T> void export_array_complex(const hoNDArray<T>& a, const std::string& filename) { try { typedef typename Gadgetron::realType<T>::Type value_type; hoNDArray<value_type> rpart, ipart, mag, phs; rpart.create(a.dimensions()); ipart.create(a.dimensions()); mag.create(a.dimensions()); phs.create(a.dimensions()); long long num = (long long)a.get_number_of_elements(); long long n; #pragma omp parallel for default(none) private(n) shared(num, a, rpart, ipart, mag, phs) for ( n=0; n<num; n++ ) { rpart(n) = a(n).real(); ipart(n) = a(n).imag(); mag(n) = std::abs( a(n) ); phs(n) = std::arg( a(n) ); } std::string filenameReal = filename; filenameReal.append("_REAL"); GADGET_CATCH_THROW(export_array(rpart, filenameReal)); std::string filenameImag = filename; filenameImag.append("_IMAG"); GADGET_CATCH_THROW(export_array(ipart, filenameImag)); std::string filenameMag = filename; filenameMag.append("_MAG"); GADGET_CATCH_THROW(export_array(mag, filenameMag)); std::string filenamePhase = filename; filenamePhase.append("_PHASE"); GADGET_CATCH_THROW(export_array(phs, filenamePhase)); } catch(...) { GADGET_THROW("Errors in export_array_complex(const hoNDArray<T>& a, const std::string& filename) ... "); } } template <typename T> void import_array_complex(hoNDArray<T>& a, const std::string& filename) { try { typedef typename T::value_type value_type; hoNDArray<value_type> real, imag; std::string filenameReal = filename; filenameReal.append("_REAL"); GADGET_CATCH_THROW(import_array(real, filenameReal)); std::string filenameImag = filename; filenameImag.append("_IMAG"); GADGET_CATCH_THROW(import_array(imag, filenameImag)); a.create(real.dimensions()); long long num = (long long)real.get_number_of_elements(); long long n; #pragma omp parallel for private(n) shared(num, a, real, imag) for ( n=0; n<num; n++ ) { a(n) = T(real(n), imag(n)); } } catch(...) { GADGET_THROW("Errors in import_array_complex(const hoNDArray<T>& a, const std::string& filename) ... "); } } template <typename T> void import_array_complex(hoNDArray<T>& a, const std::string& filename_real, const std::string& filename_imag) { try { typedef typename realType<T>::Type value_type; hoNDArray<value_type> real, imag; GADGET_CATCH_THROW(import_array(real, filename_real)); GADGET_CATCH_THROW(import_array(imag, filename_imag)); a.create(real.dimensions()); long long num = (long long)real.get_number_of_elements(); long long n; #pragma omp parallel for private(n) shared(num, a, real, imag) for ( n=0; n<num; n++ ) { a(n) = T(real(n), imag(n)); } } catch(...) { GADGET_THROW("Errors in import_array_complex(hoNDArray<T>& a, const std::string& filename_real, const std::string& filename_imag) ... "); } } template <typename T> void export_2d_array(const hoNDArray<T>& a, const std::string& filename) { export_array(a, filename); } template <typename T> void import_2d_array(hoNDArray<T>& a, const std::string& filename) { import_array(a, filename); } template <typename T> void export_2d_array_complex(const hoNDArray<T>& a, const std::string& filename) { export_array_complex(a, filename); } template <typename T> void import_2d_array_complex(hoNDArray<T>& a, const std::string& filename) { import_array_complex(a, filename); } template <typename T> void export_3d_array(const hoNDArray<T>& a, const std::string& filename) { export_array(a, filename); } template <typename T> void import_3d_array(hoNDArray<T>& a, const std::string& filename) { import_array(a, filename); } template <typename T> void export_3d_array_complex(const hoNDArray<T>& a, const std::string& filename) { export_array_complex(a, filename); } template <typename T> void import_3d_array_complex(hoNDArray<T>& a, const std::string& filename) { import_array_complex(a, filename); } template <typename T> void export_4d_array(const hoNDArray<T>& a, const std::string& filename) { try { size_t RO = a.get_size(0); size_t E1 = a.get_size(1); size_t CHA = a.get_size(2); size_t N = a.get_size(3); size_t ii; for (ii=0; ii<N; ii++ ) { std::vector<size_t> dim(3); dim[0] = RO; dim[1] = E1; dim[2] = CHA; boost::shared_ptr< std::vector<size_t> > sDim(&dim); hoNDArray<T> a3D(sDim, const_cast<T*>(a.begin()+ii*RO*E1*CHA), false); std::ostringstream ostr; ostr << filename << "_" << ii << std::ends; GADGET_CATCH_THROW(export3DArray(a3D, ostr.str())); } } catch(...) { GADGET_THROW("Errors in export_4d_array(const hoNDArray<T>& a, const std::string& filename) ... "); } } template <typename T> void export_4d_array_complex(const hoNDArray<T>& a, const std::string& filename) { try { size_t RO = a.get_size(0); size_t E1 = a.get_size(1); size_t CHA = a.get_size(2); size_t N = a.get_size(3); size_t ii; for (ii=0; ii<N; ii++ ) { std::vector<size_t> dim(3); dim[0] = RO; dim[1] = E1; dim[2] = CHA; boost::shared_ptr< std::vector<size_t> > sDim(&dim); hoNDArray<T> a3D(sDim, const_cast<T*>(a.begin()+ii*RO*E1*CHA), false); std::ostringstream ostr; ostr << filename << "_" << ii << std::ends; GADGET_CATCH_THROW(export3DArrayComplex(a3D, ostr.str())); } } catch(...) { GADGET_THROW("Errors in export_4d_array_complex(const hoNDArray<T>& a, const std::string& filename) ... "); } } static void readFromFile(const std::string& filename, char*& data, long long& length) { try { if (data!=NULL) delete [] data; ImageIOWorker ioworker_(filename, true); GADGET_CHECK_THROW(ioworker_.open()); // read the total length long long totalLen; GADGET_CHECK_THROW(ioworker_.read(reinterpret_cast<char*>(&totalLen), sizeof(long long))); length = totalLen - sizeof(long long); data = new char[length]; GADGET_CHECK_THROW(data!=NULL); GADGET_CHECK_THROW(ioworker_.read(data, length)); GADGET_CHECK_THROW(ioworker_.close()); } catch (...) { GADGET_THROW("Errors in ImageIOBase::readFromFile(const std::string& filename, char*& data, long long& length) ... "); } } static void writeToFile(const std::string& filename, char* data, long long length) { try { if ( length == 0 ) return; GADGET_CHECK_THROW(data!=NULL); ImageIOWorker ioworker_(filename, false); GADGET_CHECK_THROW(ioworker_.open()); // write the total lengh const long long totalLen = length+sizeof(long long); GADGET_CHECK_THROW(ioworker_.write(reinterpret_cast<const char*>(&totalLen), sizeof(long long))); // write the data GADGET_CHECK_THROW(ioworker_.write(data, length)); // close the file GADGET_CHECK_THROW(ioworker_.close()); } catch (...) { GADGET_THROW("Errors in ImageIOBase::writeToFile(const std::string& filename, char* data, long long length) ... "); } } protected: std::vector<float> pixelSize_; // get the run-time type ID from analyze data type or vice versa std::string getRTTIFromDataType(ImageIODataType aDT) { std::string rttiID; switch (aDT) { case DT_INT8 : rttiID = typeid(char).name(); break; case DT_UNSIGNED_CHAR : rttiID = typeid(unsigned char).name(); break; case DT_SIGNED_SHORT : rttiID = typeid(short).name(); break; case DT_UNSIGNED_SHORT : case DT_UINT16 : rttiID = typeid(unsigned short).name(); break; case DT_SIGNED_INT : rttiID = typeid(int).name(); break; case DT_UINT32 : rttiID = typeid(unsigned int).name(); break; case DT_INT64 : rttiID = typeid(long long).name(); break; case DT_UINT64 : rttiID = typeid(unsigned long long).name(); break; case DT_FLOAT : rttiID = typeid(float).name(); break; case DT_DOUBLE : rttiID = typeid(double).name(); break; case DT_FLOAT128 : rttiID = typeid(long double).name(); break; case DT_COMPLEX : rttiID = typeid( std::complex<float> ).name(); break; case DT_COMPLEX128 : rttiID = typeid( std::complex<double> ).name(); break; case DT_COMPLEX256 : rttiID = typeid( std::complex<long double> ).name(); break; case DT_RGB : rttiID = typeid( Gadgetron::rgb_type ).name(); break; case DT_RGBA32 : rttiID = typeid( Gadgetron::rgba_type ).name(); break; default: rttiID = "UNKOWN TYPE"; } return rttiID; } ImageIODataType getDataTypeFromRTTI(const std::string& name) { ImageIODataType analyzeDT = DT_ANA_UNKNOWN; if ( name == typeid(unsigned char).name() ) { analyzeDT = DT_UNSIGNED_CHAR; } if ( name == typeid(short).name() ) { analyzeDT = DT_SIGNED_SHORT; } if ( name == typeid(unsigned short).name() ) { analyzeDT = DT_UINT16; } if ( name == typeid(int).name() ) { analyzeDT = DT_SIGNED_INT; } if ( name == typeid(unsigned int).name() ) { analyzeDT = DT_UINT32; } if ( name == typeid(float).name() ) { analyzeDT = DT_FLOAT; } if ( name == typeid(double).name() ) { analyzeDT = DT_DOUBLE; } if ( name == typeid(long double).name() ) { analyzeDT = DT_FLOAT128; } if ( name == typeid( std::complex<float> ).name() ) { analyzeDT = DT_COMPLEX; } if ( name == typeid( std::complex<double> ).name() ) { analyzeDT = DT_COMPLEX128; } if ( name == typeid(std::complex<long double>).name() ) { analyzeDT = DT_COMPLEX256; } if ( name == typeid(Gadgetron::rgb_type).name() ) { analyzeDT = DT_RGB; } if ( name == typeid(Gadgetron::rgba_type).name() ) { analyzeDT = DT_RGBA32; } return analyzeDT; } template <typename T> void read_data(const std::string& filename, T* data, long long len) { try { ImageIOWorker ioworker(filename, true); GADGET_CHECK_THROW(ioworker.open()); GADGET_CHECK_THROW(ioworker.read(reinterpret_cast<char*>(data), len)); GADGET_CHECK_THROW(ioworker.close()); } catch(...) { GADGET_THROW("Errors in read_data(const std::string& filename, T* data, long long len) ... "); } } template <typename T> void write_data(const std::string& filename, const T* data, long long len) { try { ImageIOWorker ioworker(filename, false); GADGET_CHECK_THROW(ioworker.open()); GADGET_CHECK_THROW(ioworker.write(reinterpret_cast<const char*>(data), len)); GADGET_CHECK_THROW(ioworker.close()); } catch(...) { GADGET_THROW("Errors in write_data(const std::string& filename, const T* data, long long len) ... "); } } }; }
aux_cnn.h
#include <dirent.h> #include <sys/types.h> #include <cvpp/containers/matrix.h> #include <cvpp/containers/vector.h> #include <cvpp/containers/image.h> #include <cvpp/properties/pose.h> #include <sys/types.h> #include <sys/stat.h> #include <unistd.h> using namespace cvpp; Seq<String> get_files( const String& dir , const int& n = 0 ) { DIR *dp; struct dirent *dirp; Seq<String> files; if( ( dp = opendir( dir.c_str() ) ) == NULL ) disp( "Error Opening" , dir ); while( ( dirp = readdir( dp ) ) != NULL) { String file( dirp->d_name ); if( file[ file.size() - 4 ] == '.' ) files.push_back( dir + file ); } closedir( dp ); std::sort( files.begin() , files.end() ); if( n > 0 ) files.resize( n ); return files; } Matf load_vel2cam( const String& file ) { String line; std::ifstream infile( file + "/calib_velo_to_cam.txt" ); float R[9] , t[3]; while( std::getline( infile , line ) ) { if( line[0] == 'R' && line[1] == ':' ) tokenFloat( line.c_str() , R , ' ' ); if( line[0] == 'T' && line[1] == ':' ) tokenFloat( line.c_str() , t , ' ' ); } Matf T( 4 , 4 ); T.eig() << R[0] , R[1] , R[2] , t[0] , R[3] , R[4] , R[5] , t[1] , R[6] , R[7] , R[8] , t[2] , 0.0 , 0.0 , 0.0 , 1.0 ; return T.t(); } Matf load_imu2vel( const String& file ) { String line; std::ifstream infile( file + "/calib_imu_to_velo.txt" ); float R[9] , t[3]; while( std::getline( infile , line ) ) { if( line[0] == 'R' && line[1] == ':' ) tokenFloat( line.c_str() , R , ' ' ); if( line[0] == 'T' && line[1] == ':' ) tokenFloat( line.c_str() , t , ' ' ); } Matf T( 4 , 4 ); T.eig() << R[0] , R[1] , R[2] , t[0] , R[3] , R[4] , R[5] , t[1] , R[6] , R[7] , R[8] , t[2] , 0.0 , 0.0 , 0.0 , 1.0 ; return T.t(); } void load_cam2cam( const String& dir , Matf& K , Matf& D , Matf& R , Matf& P ) { String file = dir + "/calib_cam_to_cam.txt"; String line; std::ifstream infile( file ); float k[9] , d[5] , r[9] , p[12]; while( std::getline( infile , line ) ) { if( line.substr(0,4).compare( "K_02" ) == 0 ) tokenFloat( line.substr(5).c_str() , k , ' ' ); if( line.substr(0,4).compare( "D_02" ) == 0 ) tokenFloat( line.substr(5).c_str() , d , ' ' ); if( line.substr(0,9).compare( "R_rect_00" ) == 0 ) tokenFloat( line.substr(10).c_str() , r , ' ' ); if( line.substr(0,9).compare( "P_rect_02" ) == 0 ) tokenFloat( line.substr(10).c_str() , p , ' ' ); } K.reset( 3 , 3 ); forLOOPij( K.r() , K.c() ) K(i,j) = k[ i * K.c() + j ]; D.reset( 5 ); forLOOPi( D.r() ) D(i) = d[ i ]; R.reset( 4 , 4 ).setIdentity(); forLOOPij( 3 , 3 ) R(i,j) = r[ i * 3 + j ]; R.blu(3) = R.blu(3).t(); P.reset( 3 , 4 ); forLOOPij( P.r() , P.c() ) P(i,j) = p[ i * P.c() + j ]; P = P.t(); } SeqMatd load_vel( const Seq<String>& files ) { int n = files.size(); SeqMatd vels( n ); int base = 1000000; float *data = (float*)malloc( base * sizeof(float) ); forLOOPi( n ) { float *px = data + 0 , *py = data + 1; float *pz = data + 2 , *pr = data + 3; FILE *stream; stream = fopen( files[i].c_str() , "rb" ); int num = fread( data , sizeof(float) , base , stream ) / 4; vels[i].reset( num , 4 ); forLOOPj( num ) { vels[i].row(j) << *px , *py , *pz , *pr ; px += 4 ; py += 4 ; pz += 4 ; pr += 4 ; } fclose( stream ); } return vels; } SeqImg3c load_img( const Seq<String>& files ) { int n = files.size(); SeqImg3c imgs( n ); #pragma omp parallel for forLOOPi( n ) { imgs[i].load( files[i] ); } return imgs; } SeqPosef load_pos( const Seq<String>& files ) { int n = files.size(); Matf data( n , 30 ); forLOOPi( n ) { float vals[30]; std::ifstream infile( files[i] ); String line; while( std::getline( infile , line ) ) tokenFloat( ( ' ' + line ).c_str() , vals , ' ' ); forLOOPj( data.c() ) data(i,j) = vals[j]; } float lat0 = data(0,0); float r = 6378137 , s = std::cos( lat0 * PI / 180.0 ); float sr = s * r; Matf xyz( n , 6 ); forLOOPi( xyz.r() ) { float lat = data(i,0) , lon = data(i,1); float z = data(i,2) , r = data(i,3) , p = data(i,4) , w = data(i,5); float x = sr * PI * lon / 180.0; float y = sr * std::log( std::tan( PI * ( 90.0 + lat ) / 360.0 ) ); xyz.row(i) << x , y , z , r , p , w; } Matf off = xyz.cl(3).r(0).clone(); xyz.cl(3) -= off; SeqPosef poses( n ); forLOOPi( poses.size() ) poses[i].setPose( xyz.r(i) ); return poses; } void filter_img( Matf& X , const int& r , const int& c ) { Veci idx; forLOOPi( X.r() ) { if( X(i,2) > 0.0 ) if( X(i,0) > 0.0 && X(i,0) < c * X(i,2) && X(i,1) > 0.0 && X(i,1) < r * X(i,2) ) idx.insert( i ); }; idx.update(); X.SampleRows( idx ); } Matf color_pts( const Matf& uv , const Img3c& img ) { Matf clr( uv.r() , 3 ); forLOOPi( clr.r() ) { clr.row(i) << img( uv(i,1) , uv(i,0) , 2 ) , img( uv(i,1) , uv(i,0) , 1 ) , img( uv(i,1) , uv(i,0) , 0 ) ; } return clr / 255.0; } void process_frame( const SeqPosef& poses , const SeqMatd& vels , const int& t , const Matf& TRP , const Matf& iPRT ,const int& r , const int& c , Matf& vel , Matf& xyz , Matf& proj , Matf& uv , Matf& dep ) { vel = poses[t].o2w( vels[t].cl(3).toFloat() ); // proj = poses[t].w2o( vel ).appR1() * TRP; proj = vels[t].cl(3).toFloat().appR1() * TRP; filter_img( proj , r , c ); // dep = proj.c(2) , uv = proj.cl(2) / dep; // dep.AddRand( -0.5 , +0.5 ); // xyz = poses[t].o2w( ( ( uv.appR1() % dep ).appR1() * iPRT ).cl(3) ); dep = proj.c(2) , uv = proj.cl(2) / dep; dep.AddRand( 0 , +0.25 ); Matf xyz1 = poses[t].o2w( ( ( uv.appR1() % ( dep ) ).appR1() * iPRT ).cl(3) ); Matf xyz2 = poses[t].o2w( ( ( uv.appR1() % ( dep + 0.25 ) ).appR1() * iPRT ).cl(3) ); Matf xyz3 = poses[t].o2w( ( ( uv.appR1() % ( dep + 0.50 ) ).appR1() * iPRT ).cl(3) ); Matf xyz4 = poses[t].o2w( ( ( uv.appR1() % ( dep + 0.75 ) ).appR1() * iPRT ).cl(3) ); Matf xyz5 = poses[t].o2w( ( ( uv.appR1() % ( dep + 1.00 ) ).appR1() * iPRT ).cl(3) ); Matf xyz6 = poses[t].o2w( ( ( uv.appR1() % ( dep + 1.25 ) ).appR1() * iPRT ).cl(3) ); Matf xyz7 = poses[t].o2w( ( ( uv.appR1() % ( dep + 1.50 ) ).appR1() * iPRT ).cl(3) ); xyz = xyz1 | xyz2 | xyz3 | xyz4 | xyz5 | xyz6 | xyz7; } void create_images( const Matf& uv , const Matf& dep , Img1f& dspf , Img1c& dspc , const double& mlt = 3.0 ) { dspf.setVal( 0.0 ); forLOOPj( uv.r() ) { double val = mlt * dep(j); if( val > 255.0 ) val = 0.0; int v = std::floor( uv(j,1) ); int u = std::floor( uv(j,0) ); if( dspf(v,u) == 0 ) dspf(v,u) = val; else if( dspf(v,u) > val ) dspf(v,u) = val; } dspc = dspf.toUChar(); } void prep_dirs( const String& dir ) { String path; struct stat st = {0}; path = dir + "/proc"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/imgs"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/dispc"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/dispf"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/vel"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/xyz"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/uv"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); path = dir + "/proc/dep"; if( stat( path.c_str() , &st ) == -1 ) mkdir( path.c_str() , 0700 ); } void save_data( const String& suf , const String& dir , const String& path , const Img3c& img , const Img1f& dspf , const Img1c& dspc , const Matf& vel , const Matf& xyz , const Matf& uv , const Matf& dep ) { int nn = path.length() , n = 0; while( path[ nn - n ] != '/' ) n++; n--; String name = path.substr( nn - n , n -4 ); String sufname = suf + name; disp( sufname ); img.saveIMG( dir + "/proc/imgs/" + sufname + ".png" ); dspc.saveIMG( dir + "/proc/dispc/" + sufname + ".png" ); dspf.mat().saveBIN( dir + "/proc/dispf/" + sufname + ".bin" ); vel.saveBIN( dir + "/proc/vel/" + sufname + ".bin" ); xyz.saveBIN( dir + "/proc/xyz/" + sufname + ".bin" ); uv.saveBIN( dir + "/proc/uv/" + sufname + ".bin" ); dep.saveBIN( dir + "/proc/dep/" + sufname + ".bin" ); }
DRB031-truedepfirstdimension-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* There is a loop-carried true dependence within the outer level loop. Data race pair: b[i][j]@66:7 vs. b[i-1][j-1]@66:15 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i,j; int n=1000, m=1000; double b[1000][1000]; for (i=0; i<n; i++) for (j=0; j<m; j++) b[i][j] = 0.5; #pragma omp parallel for private(j) for (i=1;i<n;i++) for (j=1;j<m;j++) b[i][j]=b[i-1][j-1]; printf("b[500][500]=%f\n", b[500][500]); return 0; }
reduction_team.c
#include <stdio.h> #include <omp.h> #define N 1000000ll #define SUM (N * (N-1)/2) void checkHost(int gpu_error, int* errors, long long a){ int host_error = 0; if (a != SUM){ printf ("Host - Incorrect result = %lld, expected = %lld!\n", a, SUM); host_error = 1; (*errors)++; } if(!host_error && !gpu_error){ printf("-----> Success\n"); } else{ printf("-----> Failure\n"); } } void reduction(int num_teams, const int num_threads, int* errors){ long long a = 0; int gpu_error = 0; #pragma omp target teams num_teams(num_teams) thread_limit(num_threads) map(tofrom: a) { long long i; #pragma omp parallel for reduction(+:a) for (i = 0; i < N; i++) { a += i; } if (a != SUM){ printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM); gpu_error = 1; } } checkHost(gpu_error, errors, a); } void reduction_256(int num_teams, const int num_threads, int* errors){ long long a = 0; int gpu_error = 0; #pragma omp target teams num_teams(num_teams) thread_limit(256) map(tofrom: a) { long long i; #pragma omp parallel for reduction(+:a) for (i = 0; i < N; i++) { a += i; } if (a != SUM){ printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM); gpu_error = 1; } } checkHost(gpu_error, errors, a); } void reduction_512(int num_teams, const int num_threads, int* errors){ long long a = 0; int gpu_error = 0; #pragma omp target teams num_teams(num_teams) thread_limit(512) map(tofrom: a) { long long i; #pragma omp parallel for reduction(+:a) for (i = 0; i < N; i++) { a += i; } if (a != SUM){ printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM); gpu_error = 1; } } checkHost(gpu_error, errors, a); } void reduction_1024(int num_teams, const int num_threads, int* errors){ long long a = 0; int gpu_error = 0; #pragma omp target teams num_teams(num_teams) thread_limit(1024) map(tofrom: a) { long long i; #pragma omp parallel for reduction(+:a) for (i = 0; i < N; i++) { a += i; } if (a != SUM){ printf ("GPU - Incorrect result = %lld, expected = %lld!\n", a, SUM); gpu_error = 1; } } checkHost(gpu_error, errors, a); } int main (void) { int errors = 0; int gpu_error = 0; printf("\n---------- 1 Team with Variable Threads ----------\n"); printf("\nRunning 1 Team with 64 threads per team\n"); reduction(1, 64, &errors); printf("\nRunning 1 Team with 128 threads per team\n"); reduction(1, 128, &errors); //Have to call a different function to use a constant for num_threads because //a variable will not allow the num_threads to go above 256 printf("\nRunning 1 Team with 256 threads per team\n"); reduction_256(1, 256, &errors); printf("\nRunning 1 Team with 512 threads per team\n"); reduction_512(1, 512, &errors); printf("\nRunning 1 Team with 1024 threads per team\n"); reduction_1024(1, 1024, &errors); if(!errors){ printf("\nRESULT: ALL RUNS SUCCESSFUL!\n"); return 0; } else{ printf("\nRESULT: FAILURES OCCURED!\n"); return -1; } }