source
stringlengths
3
92
c
stringlengths
26
2.25M
fci_graph.c
/* Copyright 2021 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include "fci_graph.h" #include "bitstring.h" #include "macros.h" #include "binom.h" void calculate_Z_matrix(int32_t *out, int norb, int nele) { #define NB_ 65 uint64_t* binom = safe_malloc(binom, NB_ * NB_); initialize_binom(binom); #pragma omp parallel for schedule(static) collapse(2) for (int km = 0; km < nele - 1; ++km) { for (int llm = 0; llm < norb - nele + 1; ++llm) { const int k = km + 1; const int ll = llm + k; int64_t tmp = 0; for (int m = norb - ll + 1; m < norb - k + 1; ++m) { tmp += binom[nele - k + NB_ * m] - binom[nele - k - 1 + NB_ * (m - 1)]; } out[ll - 1 + norb * (k - 1)] = (int32_t)tmp; } } { int k = nele; for (int ll = nele; ll < norb + 1; ++ll) { out[ll - 1 + norb * (k - 1)] = ll - nele; } } free(binom); #undef NB_ } int map_deexc(int32_t *out, const int32_t *inp, const int lk, const int size, uint32_t *index, const int idx) { #pragma omp parallel for schedule(static) for (int i = 0; i < size; ++i) { const int target = inp[1 + i*3]; int32_t *out3k = out + 3 * (index[target] + lk * target); out3k[0] = inp[0 + i*3]; out3k[1] = idx; out3k[2] = inp[2 + i*3]; index[target] += 1; } return 0; } void build_mapping_strings(int (**maps)[3], int32_t *mapl, const int32_t (*exc_deexc)[2], const int nmaps, const uint64_t *strings, const int nstrings, const bool count, const int32_t *Z_matrix, const int norb) { #pragma omp parallel for schedule(static) for (int mapno = 0; mapno < nmaps; ++mapno) { const int iorb = exc_deexc[mapno][0]; const int jorb = exc_deexc[mapno][1]; int (*cmap)[3] = count ? NULL : maps[mapno]; int counter = 0; for (int stringno = 0; stringno < nstrings; ++stringno) { uint64_t cstring = strings[stringno]; if (CHECK_BIT(cstring, jorb) && !CHECK_BIT(cstring, iorb)) { if (!count) { (*cmap)[0] = string_to_index(cstring, Z_matrix, norb); (*cmap)[1] = string_to_index( UNSET_BIT(SET_BIT(cstring, iorb), jorb), Z_matrix, norb); (*cmap)[2] = count_bits_between(cstring, iorb, jorb) % 2 == 0 ? 1 : -1; ++cmap; } ++counter; } else if (iorb == jorb && CHECK_BIT(cstring, iorb)) { if (!count) { const int cid = string_to_index(cstring, Z_matrix, norb); (*cmap)[0] = cid; (*cmap)[1] = cid; (*cmap)[2] = 1; ++cmap; } ++counter; } } if (count) { mapl[mapno] = counter; } else if (counter != mapl[mapno]) { fprintf(stderr, "Length of map %d not consistent.\n", mapno); } } } int string_to_index(uint64_t string, const int32_t *Z_matrix, const int norb) { int occ = -1; int counter = 0; int id = 0; while (gbit_index(&string, &occ)) { id += Z_matrix[counter * norb + occ]; ++counter; } return id; } void calculate_string_address(uint64_t *out, const uint64_t *strings, const int length, const int32_t *Z_matrix, const int norb) { #pragma omp parallel for schedule(static) for (int i = 0; i < length; ++i) { const uint64_t string = strings[i]; const int address = string_to_index(string, Z_matrix, norb); out[address] = string; } } void map_to_deexc_alpha_icol(const int32_t ** mappings, const int32_t * mapl, const uint64_t * strings, const int nstrings, int32_t * exc, int32_t * diag, int32_t * index, const int norb, const int exc0, const int exc1, const int ldiag) { int * alpha = safe_malloc(alpha, norb * nstrings); int * count = safe_calloc(count, norb); int * counter = safe_calloc(counter, norb * exc0); for (int i = 0; i < norb * nstrings; ++i) alpha[i] = -1; const uint64_t filled_string = ((uint64_t) 1 << norb) - (uint64_t) 1; for (int i = 0; i < nstrings; ++i) { uint64_t revstring = (~strings[i]) & filled_string; int icol = -1; while (gbit_index(&revstring, &icol)) { assert(icol < norb); alpha[icol * nstrings + i] = count[icol]; index[icol * exc0 + count[icol]] = i; ++count[icol]; } } for (int i = 0; i < norb; ++i) { assert(count[i] == exc0); count[i] = 0; } #pragma omp parallel for schedule(dynamic) for (int i = 0; i < norb; ++i) { int32_t * c_exc = &exc[i * exc0 * exc1 * 3]; int32_t * c_diag = &diag[i * ldiag]; int * c_alpha = &alpha[i * nstrings]; int * ccount = &count[i]; int * counteri = &counter[i * exc0]; for (int j = 0; j < norb; ++j) { const int32_t * mapping = mappings[i * norb + j]; const int32_t nmaps = mapl[i * norb + j]; if (i != j) { for (int mapid = 0; mapid < nmaps; ++mapid) { const int32_t source = mapping[3 * mapid + 0]; const int32_t target = mapping[3 * mapid + 1]; const int32_t parity = mapping[3 * mapid + 2]; const int pos = c_alpha[target]; assert(pos != -1); int * c_counter = &counteri[pos]; int * cc_exc = &c_exc[(pos * exc1 + (*c_counter)) * 3]; cc_exc[0] = source; cc_exc[1] = j; cc_exc[2] = parity; ++(*c_counter); } } else { for (int mapid = 0; mapid < nmaps; ++mapid) { assert(mapping[3 * mapid + 0] == mapping[3 * mapid + 1]); assert(mapping[3 * mapid + 2] == 1); c_diag[(*ccount)++] = mapping[3 * mapid + 1]; } } } } free(alpha); free(count); free(counter); } int make_mapping_each(uint64_t *out, const uint64_t *strings, const int length, const int32_t *dag, const int dag_length, const int32_t *undag, const int undag_length) { uint64_t dag_mask = 0; for (int i = 0; i != dag_length; ++i) { dag_mask = SET_BIT(dag_mask, dag[i]); } uint64_t undag_mask = 0; for (int i = 0; i != undag_length; ++i) { undag_mask = SET_BIT(undag_mask, undag[i]); dag_mask = UNSET_BIT(dag_mask, undag[i]); } int count = 0; for (int i = 0; i < length; ++i) { uint64_t current = strings[i]; const uint64_t dag_masked = current & dag_mask; const uint64_t undag_masked = current & undag_mask; const bool check = !dag_masked && !(undag_masked ^ undag_mask); if (check) { int parity = 0; for (int j = undag_length-1; j >= 0; --j) { parity += count_bits_above(current, undag[j]); current = UNSET_BIT(current, undag[j]); } for (int j = dag_length-1; j >= 0; --j) { parity += count_bits_above(current, dag[j]); current = SET_BIT(current, dag[j]); } out[count * 3] = i; out[count * 3 + 1] = current; out[count * 3 + 2] = parity % 2; ++count; } } return count; } void make_mapping_each_set(uint64_t *down, uint64_t *up, const uint64_t *istrings, const int length, const int msize, const int nsize, const int dn, const int norb) { uint64_t* comb = safe_malloc(comb, msize); lexicographic_bitstring_generator(comb, norb, dn); #pragma omp parallel for schedule(dynamic) for (int c = 0; c < msize; ++c) { const uint64_t mask = comb[c]; int occ[16]; assert(count_bits(mask) == dn && dn < 16); get_occupation(occ, mask); int count = 0; for (int i = 0; i != length; ++i) { const uint64_t source = istrings[i]; if (((source & mask) ^ mask) == 0) { int parity = count_bits_above(source, occ[dn - 1]) * dn; uint64_t target = UNSET_BIT(source, occ[dn - 1]); for (int d = dn - 2; d >= 0; --d) { parity += (d + 1) * count_bits_between(source, occ[d], occ[d+1]); target = UNSET_BIT(target, occ[d]); } down[0 + 3 * (count + nsize * c)] = source; down[1 + 3 * (count + nsize * c)] = target; down[2 + 3 * (count + nsize * c)] = parity; up[0 + 3 * (count + nsize * c)] = target; up[1 + 3 * (count + nsize * c)] = source; up[2 + 3 * (count + nsize * c)] = parity; ++count; } } } free(comb); }
nco_s1d.c
/* $Header$ */ /* Purpose: NCO utilities for Sparse-1D (S1D) datasets */ /* Copyright (C) 2020--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License with exceptions described in the LICENSE file */ #include "nco_s1d.h" /* Sparse-1D datasets */ const char * /* O [sng] String describing sparse-type */ nco_s1d_sng /* [fnc] Convert sparse-1D type enum to string */ (const nco_s1d_typ_enm nco_s1d_typ) /* I [enm] Sparse-1D type enum */ { /* Purpose: Convert sparse-type enum to string */ switch(nco_s1d_typ){ case nco_s1d_clm: return "Sparse Column (cols1d) format"; case nco_s1d_grd: return "Sparse Gridcell (grid1d) format"; case nco_s1d_lnd: return "Sparse Landunit (land1d) format"; case nco_s1d_pft: return "Sparse PFT (pfts1d) format" ; default: nco_dfl_case_generic_err(); break; } /* !nco_s1d_typ_enm */ /* Some compilers: e.g., SGI cc, need return statement to end non-void functions */ return (char *)NULL; } /* !nco_s1d_sng() */ int /* O [rcd] Return code */ nco_s1d_unpack /* [fnc] Unpack sparse-1D CLM/ELM variables into full file */ (rgr_sct * const rgr, /* I/O [sct] Regridding structure */ trv_tbl_sct * const trv_tbl) /* I/O [sct] Traversal Table */ { /* Purpose: Read sparse CLM/ELM input file, inflate and write into output file */ /* Usage: ncks -D 1 -O -C --s1d ~/data/bm/elm_mali_bg_hst.nc ~/foo.nc ncks -D 1 -O -C --s1d -v cols1d_topoglc --hrz=${DATA}/bm/elm_mali_ig_hst.nc ${DATA}/bm/elm_mali_rst.nc ~/foo.nc ncks -D 1 -O -C --s1d -v GPP,pfts1d_wtgcell ~/beth_in.nc ~/foo.nc ncremap --dbg=1 --vrb=3 --devnull=No --nco='--dbg=1' -P elm -m ${DATA}/maps/map_ne30np4_to_fv128x256_aave.20160301.nc ~/foo.nc ~/foo_rgr.nc */ const char fnc_nm[]="nco_s1d_unpack()"; /* [sng] Function name */ char *fl_in; char *fl_out; char *fl_tpl; /* [sng] Template file (contains horizontal grid) */ char dmn_nm[NC_MAX_NAME]; /* [sng] Dimension name */ char *grd_nm_in=(char *)strdup("gridcell"); char *lnd_nm_in=(char *)strdup("landunit"); char *clm_nm_in=(char *)strdup("column"); char *pft_nm_in=(char *)strdup("pft"); char *mec_nm_out=(char *)strdup("mec"); int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int in_id; /* I [id] Input netCDF file ID */ int md_open; /* [enm] Mode flag for nc_open() call */ int out_id; /* I [id] Output netCDF file ID */ int rcd=NC_NOERR; int tpl_id; /* [id] Input netCDF file ID (for horizontal grid template) */ long int clm_idx; long int grd_idx_out; long int idx_out; //long int lat_idx; //long int lon_idx; long int pft_idx; int dmn_idx; /* [idx] Dimension index */ /* Initialize local copies of command-line values */ dfl_lvl=rgr->dfl_lvl; fl_in=rgr->fl_in; fl_out=rgr->fl_out; in_id=rgr->in_id; out_id=rgr->out_id; /* Search for horizontal grid */ char *bnd_nm_in=rgr->bnd_nm; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *col_nm_in=rgr->col_nm_in; /* [sng] Name to recognize as input horizontal spatial dimension on unstructured grid */ char *lat_nm_in=rgr->lat_nm_in; /* [sng] Name of input dimension to recognize as latitude */ char *lon_nm_in=rgr->lon_nm_in; /* [sng] Name of input dimension to recognize as longitude */ int dmn_id_bnd_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_col_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lon_in=NC_MIN_INT; /* [id] Dimension ID */ nco_bool FL_RTR_RMT_LCN; nco_bool flg_grd_1D=False; /* [flg] Unpacked data are on unstructured (1D) grid */ nco_bool flg_grd_2D=False; /* [flg] Unpacked data are on rectangular (2D) grid */ nco_bool flg_grd_dat=False; /* [flg] Use horizontal grid from required input data file */ nco_bool flg_grd_tpl=False; /* [flg] Use horizontal grid from optional horizontal grid template file */ nco_bool flg_nm_hst=False; /* [flg] Names in data file are as in history files ("ltype_"...) */ nco_bool flg_nm_rst=False; /* [flg] Names in data file are as in restart files ("ilun_"...) */ /* Does data file have unstructured grid? MB: Routine must handle two semantically distinct meanings of "column": 1. The horizontal dimension in an unstructured grid 2. A fraction of a landunit, which is a fraction of a CTSM/ELM gridcell In particular, a column is a fraction of a vegetated, urban, glacier, or crop landunit This routine distinguishes these meanings by abbreviating (1) as "col" and (2) as "clm" This usage maintains the precedent that "col" is the horizontal unstructured dimension in nco_rgr.c It is necessary though unintuitive that "cols1d" variable metadata will use the "clm" abbreviation */ if(col_nm_in && (rcd=nco_inq_dimid_flg(in_id,col_nm_in,&dmn_id_col_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"lndgrid",&dmn_id_col_in)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ if(dmn_id_col_in != NC_MIN_INT) flg_grd_1D=True; /* Does data file have RLL grid? */ if(!flg_grd_1D){ if(lat_nm_in && (rcd=nco_inq_dimid_flg(in_id,lat_nm_in,&dmn_id_lat_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"latitude",&dmn_id_lat_in)) == NC_NOERR) lat_nm_in=strdup("lndgrid"); /* CF */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(in_id,lon_nm_in,&dmn_id_lon_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(in_id,"longitude",&dmn_id_lon_in)) == NC_NOERR) lon_nm_in=strdup("lndgrid"); /* CF */ } /* !flg_grd_1D */ if(dmn_id_lat_in != NC_MIN_INT && dmn_id_lon_in != NC_MIN_INT) flg_grd_2D=True; /* Set where to obtain horizontal grid */ if(flg_grd_1D || flg_grd_2D) flg_grd_dat=True; else flg_grd_tpl=True; if(flg_grd_tpl && !rgr->fl_hrz){ (void)fprintf(stderr,"%s: ERROR %s did not locate horizontal grid in input data file and no optional horizontal gridfile was provided.\nHINT: Use option --hrz to specify file with horizontal grid used by input data.\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_grd_tpl */ /* Open grid template file iff necessary */ if(flg_grd_tpl && rgr->fl_hrz){ char *fl_pth_lcl=NULL; nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_OPEN=rgr->flg_uio; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ /* Duplicate (because nco_fl_mk_lcl() free()'s its fl_in) */ fl_tpl=(char *)strdup(rgr->fl_hrz); /* Make sure file is on local system and is readable or die trying */ fl_tpl=nco_fl_mk_lcl(fl_tpl,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_tpl,md_open,&bfr_sz_hnt,&tpl_id); /* Same logic used to search for grid in data file and to search for grid in template file... Does template file have unstructured grid? */ if(col_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,col_nm_in,&dmn_id_col_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(tpl_id,"lndgrid",&dmn_id_col_in)) == NC_NOERR) col_nm_in=strdup("lndgrid"); /* CLM */ if(dmn_id_col_in != NC_MIN_INT) flg_grd_1D=True; /* Does template file have RLL grid? */ if(!flg_grd_1D){ if(lat_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,lat_nm_in,&dmn_id_lat_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(tpl_id,"latitude",&dmn_id_lat_in)) == NC_NOERR) lat_nm_in=strdup("lndgrid"); /* CF */ if(lon_nm_in && (rcd=nco_inq_dimid_flg(tpl_id,lon_nm_in,&dmn_id_lon_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(tpl_id,"longitude",&dmn_id_lon_in)) == NC_NOERR) lon_nm_in=strdup("lndgrid"); /* CF */ } /* !flg_grd_1D */ if(dmn_id_lat_in != NC_MIN_INT && dmn_id_lon_in != NC_MIN_INT) flg_grd_2D=True; /* Set where to obtain horizontal grid */ if(!flg_grd_1D && !flg_grd_2D){ (void)fprintf(stderr,"%s: ERROR %s did not locate horizontal grid in input data file %s or in template file %s.\nHINT: One of those files must contain the grid dimensions and coordinates used by the packed data in the input data file.\n",nco_prg_nm_get(),fnc_nm,fl_in,fl_tpl); nco_exit(EXIT_FAILURE); } /* !flg_grd_1D */ } /* !flg_grd_tpl */ int cols1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of column */ int cols1d_ixy_id=NC_MIN_INT; /* [id] Column 2D longitude index */ int cols1d_jxy_id=NC_MIN_INT; /* [id] Column 2D latitude index */ int cols1d_lat_id=NC_MIN_INT; /* [id] Column latitude */ int cols1d_lon_id=NC_MIN_INT; /* [id] Column longitude */ int cols1d_ityp_id=NC_MIN_INT; /* [id] Column type */ int cols1d_ityplun_id=NC_MIN_INT; /* [id] Column landunit type */ int grid1d_ixy_id=NC_MIN_INT; /* [id] Gridcell 2D longitude index */ int grid1d_jxy_id=NC_MIN_INT; /* [id] Gridcell 2D latitude index */ int grid1d_lat_id=NC_MIN_INT; /* [id] Gridcell latitude */ int grid1d_lon_id=NC_MIN_INT; /* [id] Gridcell longitude */ int land1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of landunit */ int land1d_ixy_id=NC_MIN_INT; /* [id] Landunit 2D longitude index */ int land1d_jxy_id=NC_MIN_INT; /* [id] Landunit 2D latitude index */ int land1d_lat_id=NC_MIN_INT; /* [id] Landunit latitude */ int land1d_lon_id=NC_MIN_INT; /* [id] Landunit longitude */ int pfts1d_column_index_id=NC_MIN_INT; /* [id] Column index of PFT */ int pfts1d_gridcell_index_id=NC_MIN_INT; /* [id] Gridcell index of PFT */ int pfts1d_ityp_veg_id=NC_MIN_INT; /* [id] PFT vegetation type */ int pfts1d_ityplun_id=NC_MIN_INT; /* [id] PFT landunit type */ int pfts1d_ixy_id=NC_MIN_INT; /* [id] PFT 2D longitude index */ int pfts1d_jxy_id=NC_MIN_INT; /* [id] PFT 2D latitude index */ int pfts1d_lat_id=NC_MIN_INT; /* [id] PFT latitude */ int pfts1d_lon_id=NC_MIN_INT; /* [id] PFT longitude */ //int pfts1d_wtgcell_id=NC_MIN_INT; /* [id] PFT weight relative to corresponding gridcell */ int dmn_id_clm_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_grd_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lnd_in=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_pft_in=NC_MIN_INT; /* [id] Dimension ID */ nco_bool flg_s1d_clm=False; /* [flg] Dataset contains sparse variables for columns */ nco_bool flg_s1d_grd=False; /* [flg] Dataset contains sparse variables for gridcells */ nco_bool flg_s1d_lnd=False; /* [flg] Dataset contains sparse variables for landunits */ nco_bool flg_s1d_pft=False; /* [flg] Dataset contains sparse variables for PFTs */ rcd=nco_inq_att_flg(in_id,NC_GLOBAL,"ilun_vegetated_or_bare_soil",(nc_type *)NULL,(long *)NULL); if(rcd == NC_NOERR) flg_nm_rst=True; rcd=nco_inq_att_flg(in_id,NC_GLOBAL,"ltype_vegetated_or_bare_soil",(nc_type *)NULL,(long *)NULL); if(rcd == NC_NOERR) flg_nm_hst=True; assert(!(flg_nm_hst && flg_nm_rst)); if(!flg_nm_hst && !flg_nm_rst){ (void)fprintf(stderr,"%s: ERROR %s reports input data file lacks expected global attributes\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_nm_hst */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO %s will assume input attributes and variables use CLM/ELM %s naming conventions like %s\n",nco_prg_nm_get(),fnc_nm,flg_nm_hst ? "history file" : "restart file",flg_nm_hst ? "\"ltype_...\"" : "\"ilun_...\""); rcd=nco_inq_varid_flg(in_id,"cols1d_lat",&cols1d_lat_id); if(cols1d_lat_id != NC_MIN_INT) flg_s1d_clm=True; if(flg_s1d_clm){ rcd=nco_inq_varid(in_id,"cols1d_ixy",&cols1d_ixy_id); rcd=nco_inq_varid(in_id,"cols1d_jxy",&cols1d_jxy_id); rcd=nco_inq_varid(in_id,"cols1d_lon",&cols1d_lon_id); rcd=nco_inq_varid_flg(in_id,"cols1d_gridcell_index",&cols1d_gridcell_index_id); /* ELM/MALI restart */ rcd=nco_inq_varid_flg(in_id,"cols1d_ityp",&cols1d_ityp_id); /* ELM/MALI restart */ if(flg_nm_hst) rcd=nco_inq_varid(in_id,"cols1d_itype_lunit",&cols1d_ityplun_id); else rcd=nco_inq_varid(in_id,"cols1d_ityplun",&cols1d_ityplun_id); } /* !flg_s1d_clm */ rcd=nco_inq_varid_flg(in_id,"grid1d_lat",&grid1d_lat_id); if(grid1d_lat_id != NC_MIN_INT) flg_s1d_grd=True; if(flg_s1d_grd){ rcd=nco_inq_varid(in_id,"grid1d_ixy",&grid1d_ixy_id); rcd=nco_inq_varid(in_id,"grid1d_jxy",&grid1d_jxy_id); rcd=nco_inq_varid(in_id,"grid1d_lon",&grid1d_lon_id); } /* !flg_s1d_grd */ rcd=nco_inq_varid_flg(in_id,"land1d_lat",&land1d_lat_id); if(land1d_lat_id != NC_MIN_INT) flg_s1d_lnd=True; if(flg_s1d_lnd){ rcd=nco_inq_varid_flg(in_id,"land1d_gridcell_index",&land1d_gridcell_index_id); rcd=nco_inq_varid(in_id,"land1d_ixy",&land1d_ixy_id); rcd=nco_inq_varid(in_id,"land1d_jxy",&land1d_jxy_id); rcd=nco_inq_varid(in_id,"land1d_lon",&land1d_lon_id); } /* !flg_s1d_lnd */ rcd=nco_inq_varid_flg(in_id,"pfts1d_lat",&pfts1d_lat_id); if(pfts1d_lat_id != NC_MIN_INT) flg_s1d_pft=True; if(flg_s1d_pft){ rcd=nco_inq_varid(in_id,"pfts1d_ixy",&pfts1d_ixy_id); rcd=nco_inq_varid(in_id,"pfts1d_jxy",&pfts1d_jxy_id); rcd=nco_inq_varid(in_id,"pfts1d_lon",&pfts1d_lon_id); rcd=nco_inq_varid_flg(in_id,"pfts1d_column_index",&pfts1d_column_index_id); rcd=nco_inq_varid_flg(in_id,"pfts1d_gridcell_index",&pfts1d_gridcell_index_id); //if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_wtgcell",&pfts1d_wtgcell_id); else rcd=nco_inq_varid(in_id,"pfts1d_wtxy",&pfts1d_wtgcell_id); if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_itype_lunit",&pfts1d_ityplun_id); else rcd=nco_inq_varid(in_id,"pfts1d_ityplun",&pfts1d_ityplun_id); if(flg_nm_hst) rcd=nco_inq_varid(in_id,"pfts1d_itype_veg",&pfts1d_ityp_veg_id); else rcd=nco_inq_varid(in_id,"pfts1d_itypveg",&pfts1d_ityp_veg_id); } /* !flg_s1d_pft */ if(!(flg_s1d_clm || flg_s1d_lnd || flg_s1d_pft)){ (void)fprintf(stderr,"%s: ERROR %s does not detect any of the key variables (currently cols1d_lat, land1d_lat, pfts1d_lat) used to indicate presence of sparse-packed (S1D) variables\nHINT: Be sure the target dataset (file) contains S1D variables---not all CLM/ELM history (as opposed to restart) files do\n",nco_prg_nm_get(),fnc_nm); nco_exit(EXIT_FAILURE); } /* !flg_s1d_clm... */ if(flg_s1d_clm) rcd=nco_inq_dimid(in_id,clm_nm_in,&dmn_id_clm_in); if(flg_s1d_grd) rcd=nco_inq_dimid(in_id,grd_nm_in,&dmn_id_grd_in); if(flg_s1d_lnd) rcd=nco_inq_dimid(in_id,lnd_nm_in,&dmn_id_lnd_in); if(flg_s1d_pft) rcd=nco_inq_dimid(in_id,pft_nm_in,&dmn_id_pft_in); if(nco_dbg_lvl_get() >= nco_dbg_std){ (void)fprintf(stderr,"%s: INFO %s necessary information to unpack cols1d variables\n",nco_prg_nm_get(),flg_s1d_clm ? "Found all" : "Could not find"); (void)fprintf(stderr,"%s: INFO %s necessary information to unpack land1d variables\n",nco_prg_nm_get(),flg_s1d_lnd ? "Found all" : "Could not find"); (void)fprintf(stderr,"%s: INFO %s necessary information to unpack pfts1d variables\n",nco_prg_nm_get(),flg_s1d_pft ? "Found all" : "Could not find"); } /* !dbg */ /* Collect other information from data and template files */ int dmn_nbr_in; /* [nbr] Number of dimensions in input file */ int dmn_nbr_out; /* [nbr] Number of dimensions in output file */ int var_nbr; /* [nbr] Number of variables in file */ rcd=nco_inq(in_id,&dmn_nbr_in,&var_nbr,(int *)NULL,(int *)NULL); const unsigned int trv_nbr=trv_tbl->nbr; /* [idx] Number of traversal table entries */ int var_cpy_nbr=0; /* [nbr] Number of copied variables */ int var_rgr_nbr=0; /* [nbr] Number of unpacked variables */ int var_xcl_nbr=0; /* [nbr] Number of deleted variables */ int var_crt_nbr=0; /* [nbr] Number of created variables */ //long idx; /* [idx] Generic index */ unsigned int idx_tbl; /* [idx] Counter for traversal table */ char *dmn_nm_cp; /* [sng] Dimension name as char * to reduce indirection */ nco_bool has_clm; /* [flg] Contains column dimension */ nco_bool has_grd; /* [flg] Contains gridcell dimension */ nco_bool has_lnd; /* [flg] Contains landunit dimension */ nco_bool has_pft; /* [flg] Contains PFT dimension */ nco_bool need_clm=False; /* [flg] At least one variable to unpack needs column dimension */ nco_bool need_grd=False; /* [flg] At least one variable to unpack needs gridcell dimension */ nco_bool need_lnd=False; /* [flg] At least one variable to unpack needs landunit dimension */ nco_bool need_mec=False; /* [flg] At least one variable to unpack needs MEC dimension */ nco_bool need_pft=False; /* [flg] At least one variable to unpack needs PFT dimension */ trv_sct trv; /* [sct] Traversal table object structure to reduce indirection */ /* Define unpacking flag for each variable */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ dmn_nbr_in=trv_tbl->lst[idx_tbl].nbr_dmn; has_clm=has_grd=has_lnd=has_pft=False; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ /* Pre-determine flags necessary during next loop */ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; if(!has_clm && clm_nm_in) has_clm=!strcmp(dmn_nm_cp,clm_nm_in); if(!has_grd && grd_nm_in) has_grd=!strcmp(dmn_nm_cp,grd_nm_in); if(!has_lnd && lnd_nm_in) has_lnd=!strcmp(dmn_nm_cp,lnd_nm_in); if(!has_pft && pft_nm_in) has_pft=!strcmp(dmn_nm_cp,pft_nm_in); } /* !dmn_idx */ /* Unpack variables that contain a sparse-1D dimension */ if(has_clm || has_grd || has_lnd || has_pft){ trv_tbl->lst[idx_tbl].flg_rgr=True; var_rgr_nbr++; if(has_clm) need_clm=True; if(has_grd) need_grd=True; if(has_lnd) need_lnd=True; if(has_pft) need_pft=True; } /* endif */ /* Copy all variables that are not regridded or omitted */ if(!trv_tbl->lst[idx_tbl].flg_rgr) var_cpy_nbr++; } /* end nco_obj_typ_var */ } /* end idx_tbl */ if(!var_rgr_nbr) (void)fprintf(stdout,"%s: WARNING %s reports no variables fit unpacking criteria. The sparse data unpacker expects at least one variable to unpack, and variables not unpacked are copied straight to output. HINT: If the name(s) of the input sparse-1D dimensions (e.g., \"column\", \"landunit\", and \"pft\") do not match NCO's preset defaults (case-insensitive unambiguous forms and abbreviations of \"column\", \"landunit\", and/or \"pft\", respectively) then change the dimension names that NCO looks for. Instructions are at http://nco.sf.net/nco.html#sparse. For CTSM/ELM sparse-1D coordinate grids, the \"column\", \"landunit\", and \"pft\" variable names can be set with, e.g., \"ncks --rgr column_nm=clm#landunit_nm=lnd#pft_nm=pft\" or \"ncremap -R '--rgr clm=clm#lnd=lnd#pft=pft'\".\n",nco_prg_nm_get(),fnc_nm); if(nco_dbg_lvl_get() >= nco_dbg_fl){ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr) (void)fprintf(stderr,"Unpack %s? %s\n",trv.nm,trv.flg_rgr ? "Yes" : "No"); } /* end idx_tbl */ } /* end dbg */ long clm_nbr_in=NC_MIN_INT; /* [nbr] Number of columns in input data */ long grd_nbr_in=NC_MIN_INT; /* [nbr] Number of gridcells in input data */ long lnd_nbr_in=NC_MIN_INT; /* [nbr] Number of landunits in input data */ long pft_nbr_in=NC_MIN_INT; /* [nbr] Number of PFTs in input data */ long clm_nbr_out=NC_MIN_INT; /* [nbr] Number of columns in output data */ long grd_nbr_out=NC_MIN_INT; /* [nbr] Number of gridcells in output data */ long lnd_nbr_out=NC_MIN_INT; /* [nbr] Number of landunits in output data */ long mec_nbr_out=NC_MIN_INT; /* [nbr] Number of MECs in output data */ long pft_nbr_out=NC_MIN_INT; /* [nbr] Number of PFTs in output data */ if(need_clm) rcd=nco_inq_dimlen(in_id,dmn_id_clm_in,&clm_nbr_in); if(need_grd) rcd=nco_inq_dimlen(in_id,dmn_id_grd_in,&grd_nbr_in); if(need_lnd) rcd=nco_inq_dimlen(in_id,dmn_id_lnd_in,&lnd_nbr_in); if(need_pft) rcd=nco_inq_dimlen(in_id,dmn_id_pft_in,&pft_nbr_in); int hrz_id; /* [id] Horizontal grid netCDF file ID */ long bnd_nbr=int_CEWI; /* [nbr] Number of boundaries for output time and rectangular grid coordinates, and number of vertices for output non-rectangular grid coordinates */ long col_nbr; /* [nbr] Number of columns */ long lon_nbr; /* [nbr] Number of longitudes */ long lat_nbr; /* [nbr] Number of latitudes */ size_t grd_sz_in; /* [nbr] Number of elements in single layer of input grid */ size_t grd_sz_out; /* [nbr] Number of elements in single layer of output grid */ if(flg_grd_dat) hrz_id=in_id; else hrz_id=tpl_id; /* Locate bounds dimension, if any, in file containing horizontal grid */ if(bnd_nm_in && (rcd=nco_inq_dimid_flg(hrz_id,bnd_nm_in,&dmn_id_bnd_in)) == NC_NOERR) /* do nothing */; else if((rcd=nco_inq_dimid_flg(hrz_id,"nv",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("nv"); /* fxm */ else if((rcd=nco_inq_dimid_flg(hrz_id,"nvertices",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("nvertices"); /* CICE */ else if((rcd=nco_inq_dimid_flg(hrz_id,"maxEdges",&dmn_id_bnd_in)) == NC_NOERR) bnd_nm_in=strdup("maxEdges"); /* MPAS */ if(flg_grd_1D) rcd=nco_inq_dimlen(hrz_id,dmn_id_col_in,&col_nbr); if(flg_grd_2D){ rcd=nco_inq_dimlen(hrz_id,dmn_id_lat_in,&lat_nbr); rcd=nco_inq_dimlen(hrz_id,dmn_id_lon_in,&lon_nbr); } /* !flg_grd_2D */ if(dmn_id_bnd_in != NC_MIN_INT) rcd=nco_inq_dimlen(hrz_id,dmn_id_bnd_in,&bnd_nbr); if(grd_nbr_in != NC_MIN_INT){ grd_sz_in=grd_nbr_in; }else{ grd_sz_in= flg_grd_1D ? col_nbr : lat_nbr*lon_nbr; } /* !grd_nbr_in */ grd_sz_out= flg_grd_1D ? col_nbr : lat_nbr*lon_nbr; /* Lay-out unpacked file */ char *bnd_nm_out=NULL; char *col_nm_out=NULL; char *lat_nm_out=NULL; char *lon_nm_out=NULL; char *lat_dmn_nm_out; char *lon_dmn_nm_out; int dmn_id_bnd_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_col_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lat_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lon_out=NC_MIN_INT; /* [id] Dimension ID */ if(rgr->bnd_nm) bnd_nm_out=rgr->bnd_nm; else bnd_nm_out=bnd_nm_in; if(rgr->col_nm_out) col_nm_out=rgr->col_nm_out; else col_nm_out=col_nm_in; if(rgr->lat_dmn_nm) lat_dmn_nm_out=rgr->lat_dmn_nm; else lat_dmn_nm_out=lat_nm_in; if(rgr->lon_dmn_nm) lon_dmn_nm_out=rgr->lon_dmn_nm; else lon_dmn_nm_out=lon_nm_in; if(rgr->lat_nm_out) lat_nm_out=rgr->lat_nm_out; else lat_nm_out=lat_nm_in; if(rgr->lon_nm_out) lon_nm_out=rgr->lon_nm_out; else lon_nm_out=lon_nm_in; /* Define horizontal dimensions before all else */ if(flg_grd_1D){ rcd=nco_def_dim(out_id,col_nm_out,col_nbr,&dmn_id_col_out); } /* !flg_grd_1D */ if(flg_grd_2D){ rcd=nco_def_dim(out_id,lat_nm_out,lat_nbr,&dmn_id_lat_out); rcd=nco_def_dim(out_id,lon_nm_out,lon_nbr,&dmn_id_lon_out); } /* !flg_grd_2D */ if(dmn_id_bnd_in != NC_MIN_INT) rcd=nco_def_dim(out_id,bnd_nm_out,bnd_nbr,&dmn_id_bnd_out); char *clm_nm_out=NULL; char *grd_nm_out=NULL; char *lnd_nm_out=NULL; char *pft_nm_out=NULL; if(need_clm) clm_nm_out=(char *)strdup(clm_nm_in); if(need_grd) grd_nm_out=(char *)strdup(grd_nm_in); if(need_lnd) lnd_nm_out=(char *)strdup(lnd_nm_in); if(need_pft) pft_nm_out=(char *)strdup(pft_nm_in); int dmn_id_clm_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_lnd_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_mec_out=NC_MIN_INT; /* [id] Dimension ID */ int dmn_id_pft_out=NC_MIN_INT; /* [id] Dimension ID */ /* fxm: make an ilun enumerated type? */ int ilun_vegetated_or_bare_soil; /* 1 [enm] */ int ilun_crop; /* 2 [enm] */ int ilun_landice; /* 3 [enm] */ int ilun_landice_multiple_elevation_classes; /* 4 [enm] */ int ilun_deep_lake; /* 5 [enm] */ int ilun_wetland; /* 6 [enm] */ int ilun_urban_tbd; /* 7 [enm] */ int ilun_urban_hd; /* 8 [enm] */ int ilun_urban_md; /* 9 [enm] */ if(flg_nm_hst){ rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_vegetated_or_bare_soil",&ilun_vegetated_or_bare_soil,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_crop",&ilun_crop,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_landice",&ilun_landice,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_landice_multiple_elevation_classes",&ilun_landice_multiple_elevation_classes,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_deep_lake",&ilun_deep_lake,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_wetland",&ilun_wetland,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_tbd",&ilun_urban_tbd,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_hd",&ilun_urban_hd,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ltype_urban_md",&ilun_urban_md,NC_INT); }else{ /* !flg_nm_hst */ rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_vegetated_or_bare_soil",&ilun_vegetated_or_bare_soil,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_crop",&ilun_crop,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_landice",&ilun_landice,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_landice_multiple_elevation_classes",&ilun_landice_multiple_elevation_classes,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_deep_lake",&ilun_deep_lake,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_wetland",&ilun_wetland,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_tbd",&ilun_urban_tbd,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_hd",&ilun_urban_hd,NC_INT); rcd=nco_get_att(in_id,NC_GLOBAL,"ilun_urban_md",&ilun_urban_md,NC_INT); } /* !flg_nm_hst */ /* Determine output Column dimension if needed */ int *cols1d_ityp=NULL; /* [id] Column type */ int *cols1d_ityplun=NULL; /* [id] Column landunit type */ if(need_clm){ if(cols1d_ityp_id != NC_MIN_INT) cols1d_ityp=(int *)nco_malloc(clm_nbr_in*sizeof(int)); cols1d_ityplun=(int *)nco_malloc(clm_nbr_in*sizeof(int)); if(cols1d_ityp_id != NC_MIN_INT) rcd=nco_get_var(in_id,cols1d_ityp_id,cols1d_ityp,NC_INT); rcd=nco_get_var(in_id,cols1d_ityplun_id,cols1d_ityplun,NC_INT); mec_nbr_out=0; for(clm_idx=0;clm_idx<clm_nbr_in;clm_idx++){ if(cols1d_ityplun[clm_idx] != ilun_landice_multiple_elevation_classes) continue; while(cols1d_ityplun[clm_idx++] == ilun_landice_multiple_elevation_classes) mec_nbr_out++; break; } /* !clm_idx */ /* NB: landice_MEC (ilun=4, usually) landunits have 10 (always, AFAICT) glacier elevation classes */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO mec_nbr_out = %ld\n",nco_prg_nm_get(),mec_nbr_out); } /* !need_clm */ /* Determine output Grid dimension if needed: CLM/ELM 'gridcell' dimension counts each gridcell that contains land Replace this dimension by horizontal dimension(s) in input data file */ if(need_clm){ if(flg_grd_1D) grd_nbr_out=col_nbr; if(flg_grd_2D) grd_nbr_out=lat_nbr*lon_nbr; if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO grd_nbr_out = %ld\n",nco_prg_nm_get(),grd_nbr_out); } /* !need_grd */ /* Determine output Landunit dimension if needed */ if(need_lnd){ lnd_nbr_out=3; /* fxm: Based on TBUILD variable for 3 urban landunit types */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO lnd_nbr_out = %ld\n",nco_prg_nm_get(),lnd_nbr_out); } /* !need_lnd */ /* Determine output PFT dimension if needed */ //double *pfts1d_wtgcell=NULL; /* [id] PFT weight relative to corresponding gridcell */ int *pfts1d_ityp_veg=NULL; /* [id] PFT vegetation type */ int *pfts1d_ityplun=NULL; /* [id] PFT landunit type */ int *pfts1d_ixy=NULL; /* [id] PFT 2D longitude index */ int *pfts1d_jxy=NULL; /* [id] PFT 2D latitude index */ int pft_typ; /* [enm] PFT type */ if(need_pft){ //pfts1d_wtgcell=(double *)nco_malloc(pft_nbr_in*sizeof(double)); pfts1d_ityp_veg=(int *)nco_malloc(pft_nbr_in*sizeof(int)); pfts1d_ityplun=(int *)nco_malloc(pft_nbr_in*sizeof(int)); //rcd=nco_get_var(in_id,pfts1d_wtgcell_id,pfts1d_wtgcell,NC_DOUBLE); rcd=nco_get_var(in_id,pfts1d_ityp_veg_id,pfts1d_ityp_veg,NC_INT); rcd=nco_get_var(in_id,pfts1d_ityplun_id,pfts1d_ityplun,NC_INT); pft_nbr_out=0; for(pft_idx=0;pft_idx<pft_nbr_in;pft_idx++){ if((pfts1d_ityplun[pft_idx] != ilun_vegetated_or_bare_soil) && (pfts1d_ityplun[pft_idx] != ilun_crop)) continue; /* Skip bare ground */ while(pfts1d_ityp_veg[++pft_idx] != 0) pft_nbr_out++; break; } /* !pft_idx */ if(nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: INFO pft_nbr_out = %ld\n",nco_prg_nm_get(),pft_nbr_out); pfts1d_ixy=(int *)nco_malloc(pft_nbr_in*sizeof(int)); rcd=nco_get_var(in_id,pfts1d_ixy_id,pfts1d_ixy,NC_INT); if(flg_grd_2D){ pfts1d_jxy=(int *)nco_malloc(pft_nbr_in*sizeof(int)); rcd=nco_get_var(in_id,pfts1d_jxy_id,pfts1d_jxy,NC_INT); } /* !flg_grd_2D */ } /* !need_pft */ /* Define unpacked versions of needed dimensions before all else */ (void)fprintf(stdout,"%s: DEBUG quark1\n",nco_prg_nm_get()); if(need_clm && clm_nbr_out > 0L) rcd=nco_def_dim(out_id,clm_nm_out,clm_nbr_out,&dmn_id_clm_out); if(need_lnd && lnd_nbr_out > 0L) rcd=nco_def_dim(out_id,lnd_nm_out,lnd_nbr_out,&dmn_id_lnd_out); if(need_pft && pft_nbr_out > 0L) rcd=nco_def_dim(out_id,pft_nm_out,pft_nbr_out,&dmn_id_pft_out); /* Assume MECs are new output dimension if they are enumerated in input */ if(mec_nbr_out > 0L) rcd=nco_def_dim(out_id,mec_nm_out,mec_nbr_out,&dmn_id_mec_out); /* Pre-allocate dimension ID and cnt/srt space */ char *var_nm; /* [sng] Variable name */ int *dmn_ids_in=NULL; /* [id] Dimension IDs */ int *dmn_ids_out=NULL; /* [id] Dimension IDs */ int dmn_nbr_max; /* [nbr] Maximum number of dimensions variable can have in input or output */ int var_id_in; /* [id] Variable ID */ int var_id_out; /* [id] Variable ID */ long *dmn_cnt_in=NULL; long *dmn_cnt_out=NULL; long *dmn_srt=NULL; nc_type var_typ; /* [enm] Variable type (same for input and output variable) */ nco_bool PCK_ATT_CPY=True; /* [flg] Copy attributes "scale_factor", "add_offset" */ int dmn_in_fst; /* [idx] Offset of input- relative to output-dimension due to non-MRV dimension insertion */ int dmn_nbr_rec; /* [nbr] Number of unlimited dimensions */ int *dmn_ids_rec=NULL; /* [id] Unlimited dimension IDs */ rcd+=nco_inq_ndims(in_id,&dmn_nbr_max); dmn_ids_in=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); dmn_ids_out=(int *)nco_malloc(dmn_nbr_max*sizeof(int)); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* Obtain record dimension information from data file (restart files have no time dimension) */ rcd+=nco_inq_unlimdims(in_id,&dmn_nbr_rec,(int *)NULL); if(dmn_nbr_rec > 0){ dmn_ids_rec=(int *)nco_malloc(dmn_nbr_rec*sizeof(int)); rcd=nco_inq_unlimdims(in_id,&dmn_nbr_rec,dmn_ids_rec); } /* !dmn_nbr_rec */ int shuffle; /* [flg] Turn-on shuffle filter */ int deflate; /* [flg] Turn-on deflate filter */ deflate=(int)True; shuffle=NC_SHUFFLE; dfl_lvl=rgr->dfl_lvl; fl_out_fmt=rgr->fl_out_fmt; //const int dmn_nbr_0D=0; /* [nbr] Rank of 0-D grid variables (scalars) */ const int dmn_nbr_1D=1; /* [nbr] Rank of 1-D grid variables */ const int dmn_nbr_2D=2; /* [nbr] Rank of 2-D grid variables */ nc_type crd_typ_in; nc_type crd_typ_out; /* Required grid variables */ int lat_in_id; /* [id] Variable ID for latitude */ int lat_out_id; /* [id] Variable ID for latitude */ int lon_in_id; /* [id] Variable ID for longitude */ int lon_out_id; /* [id] Variable ID for longitude */ rcd=nco_inq_varid(hrz_id,lat_nm_in,&lat_in_id); rcd=nco_inq_varid(hrz_id,lon_nm_in,&lon_in_id); rcd=nco_inq_vartype(hrz_id,lat_in_id,&crd_typ_in); /* NB: ELM/CLM history files default to NC_FLOAT for most grid variables To convert to NC_DOUBLE on output, also convert _FillValue attribute type consistently */ crd_typ_out=crd_typ_in; /* Optional grid variables */ char *area_nm; char *sgs_frc_nm; char *lat_bnd_nm; char *lon_bnd_nm; char *sgs_msk_nm; int area_in_id=NC_MIN_INT; /* [id] Variable ID for area */ int area_out_id=NC_MIN_INT; /* [id] Variable ID for area */ int sgs_frc_in_id=NC_MIN_INT; /* [id] Variable ID for fraction */ int sgs_frc_out_id=NC_MIN_INT; /* [id] Variable ID for fraction */ int lat_bnd_in_id=NC_MIN_INT; /* [id] Variable ID for latitude bounds */ int lat_bnd_out_id=NC_MIN_INT; /* [id] Variable ID for latitude bounds */ int lon_bnd_in_id=NC_MIN_INT; /* [id] Variable ID for longitude bounds */ int lon_bnd_out_id=NC_MIN_INT; /* [id] Variable ID for longitude bounds */ int sgs_msk_in_id=NC_MIN_INT; /* [id] Variable ID for mask */ int sgs_msk_out_id=NC_MIN_INT; /* [id] Variable ID for mask */ nco_bool flg_area_out=False; /* [flg] Add area to output */ nco_bool flg_lat_bnd_out=False; /* [flg] Add latitude bounds to output */ nco_bool flg_lon_bnd_out=False; /* [flg] Add longitude bounds to output */ nco_bool flg_sgs_frc_out=False; /* [flg] Add fraction to output */ nco_bool flg_sgs_msk_out=False; /* [flg] Add mask to output */ area_nm=rgr->area_nm ? rgr->area_nm : strdup("area"); lat_bnd_nm=rgr->lat_bnd_nm ? rgr->lat_bnd_nm : strdup("lat_bnd"); lon_bnd_nm=rgr->lon_bnd_nm ? rgr->lon_bnd_nm : strdup("lon_bnd"); sgs_frc_nm=rgr->sgs_frc_nm ? rgr->sgs_frc_nm : strdup("landfrac"); sgs_msk_nm=rgr->sgs_msk_nm ? rgr->sgs_msk_nm : strdup("landmask"); if((rcd=nco_inq_varid_flg(hrz_id,area_nm,&area_in_id)) == NC_NOERR) flg_area_out=True; if((rcd=nco_inq_varid_flg(hrz_id,lat_bnd_nm,&lat_bnd_in_id)) == NC_NOERR) flg_lat_bnd_out=True; if((rcd=nco_inq_varid_flg(hrz_id,lon_bnd_nm,&lon_bnd_in_id)) == NC_NOERR) flg_lon_bnd_out=True; if((rcd=nco_inq_varid_flg(hrz_id,sgs_frc_nm,&sgs_frc_in_id)) == NC_NOERR) flg_sgs_frc_out=True; if((rcd=nco_inq_varid_flg(hrz_id,sgs_msk_nm,&sgs_msk_in_id)) == NC_NOERR) flg_sgs_msk_out=True; (void)fprintf(stdout,"%s: DEBUG quark2\n",nco_prg_nm_get()); if(flg_grd_1D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lat_in_id,lat_out_id,PCK_ATT_CPY); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lon_in_id,lon_out_id,PCK_ATT_CPY); var_crt_nbr++; if(flg_lat_bnd_out){ dmn_ids_out[0]=dmn_id_col_out; dmn_ids_out[1]=dmn_id_bnd_out; rcd+=nco_def_var(out_id,lat_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lat_bnd_in_id,lat_bnd_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_lat_bnd_out */ if(flg_lon_bnd_out){ dmn_ids_out[0]=dmn_id_col_out; dmn_ids_out[1]=dmn_id_bnd_out; rcd+=nco_def_var(out_id,lon_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lon_bnd_in_id,lon_bnd_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_lon_bnd_out */ if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,area_in_id,area_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_area_out */ if(flg_sgs_frc_out){ rcd+=nco_def_var(out_id,sgs_frc_nm,crd_typ_out,dmn_nbr_1D,&dmn_id_col_out,&sgs_frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,sgs_frc_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,sgs_frc_in_id,sgs_frc_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_sgs_frc_out */ if(flg_sgs_msk_out){ rcd+=nco_def_var(out_id,sgs_msk_nm,(nc_type)NC_INT,dmn_nbr_1D,&dmn_id_col_out,&sgs_msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,sgs_msk_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,sgs_msk_in_id,sgs_msk_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_sgs_msk_out */ } /* !flg_grd_1D */ if(flg_grd_2D){ rcd+=nco_def_var(out_id,lat_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lat_out,&lat_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lat_in_id,lat_out_id,PCK_ATT_CPY); var_crt_nbr++; rcd+=nco_def_var(out_id,lon_nm_out,crd_typ_out,dmn_nbr_1D,&dmn_id_lon_out,&lon_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lon_in_id,lon_out_id,PCK_ATT_CPY); var_crt_nbr++; if(flg_lat_bnd_out){ dmn_ids_out[0]=dmn_id_lat_out; dmn_ids_out[1]=dmn_id_bnd_out; rcd+=nco_def_var(out_id,lat_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lat_bnd_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lat_bnd_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lat_bnd_in_id,lat_bnd_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_lat_bnd_out */ if(flg_lon_bnd_out){ dmn_ids_out[0]=dmn_id_lon_out; dmn_ids_out[1]=dmn_id_bnd_out; rcd+=nco_def_var(out_id,lon_bnd_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&lon_bnd_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,lon_bnd_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,lon_bnd_in_id,lon_bnd_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_lon_bnd_out */ dmn_ids_out[0]=dmn_id_lat_out; dmn_ids_out[1]=dmn_id_lon_out; if(flg_area_out){ rcd+=nco_def_var(out_id,area_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&area_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,area_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,area_in_id,area_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_area_out */ if(flg_sgs_frc_out){ rcd+=nco_def_var(out_id,sgs_frc_nm,crd_typ_out,dmn_nbr_2D,dmn_ids_out,&sgs_frc_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,sgs_frc_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,sgs_frc_in_id,sgs_frc_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_sgs_frc_out */ if(flg_sgs_msk_out){ rcd+=nco_def_var(out_id,sgs_msk_nm,(nc_type)NC_INT,dmn_nbr_2D,dmn_ids_out,&sgs_msk_out_id); if(dfl_lvl > 0) (void)nco_def_var_deflate(out_id,sgs_msk_out_id,shuffle,deflate,dfl_lvl); (void)nco_att_cpy(hrz_id,out_id,sgs_msk_in_id,sgs_msk_out_id,PCK_ATT_CPY); var_crt_nbr++; } /* !flg_sgs_msk_out */ } /* !flg_grd_2D */ int flg_pck; /* [flg] Variable is packed on disk */ nco_bool has_mss_val; /* [flg] Has numeric missing value attribute */ nco_bool flg_add_spc_crd; /* [flg] Add spatial coordinates to S1D variable */ float mss_val_flt; double mss_val_dbl; nco_s1d_typ_enm nco_s1d_typ; /* [enm] Sparse-1D type of input variable */ aed_sct aed_mtd_fll_val; (void)fprintf(stdout,"%s: DEBUG quark3\n",nco_prg_nm_get()); /* Define unpacked S1D and copied variables in output file */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ var_nm=trv.nm; /* Preserve input type in output type */ var_typ=trv.var_typ; dmn_nbr_in=trv.nbr_dmn; dmn_nbr_out=trv.nbr_dmn; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid_flg(out_id,var_nm,&var_id_out); /* If variable has not been defined, define it */ if(rcd != NC_NOERR){ if(trv.flg_rgr){ /* Unpack */ (void)fprintf(stdout,"%s: DEBUG quark4\n",nco_prg_nm_get()); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in); dmn_in_fst=0; flg_add_spc_crd=False; rcd=nco_inq_var_packing(in_id,var_id_in,&flg_pck); if(flg_pck) (void)fprintf(stdout,"%s: WARNING %s reports S1D variable \"%s\" is packed so results unpredictable. HINT: If regridded values seems weird, retry after unpacking input file with, e.g., \"ncpdq -U in.nc out.nc\"\n",nco_prg_nm_get(),fnc_nm,var_nm); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); if(clm_nm_in && !strcmp(dmn_nm,clm_nm_in)){ if(mec_nbr_out > 0L){ /* Change input column dimension to MEC if present */ dmn_ids_out[dmn_idx]=dmn_id_mec_out; dmn_cnt_out[dmn_idx]=mec_nbr_out; dmn_in_fst++; dmn_nbr_out++; } /* !mec_nbr_out */ flg_add_spc_crd=True; }else if(!strcmp(dmn_nm,grd_nm_in)){ /* Gridcell dimension disappears to become spatial dimension in output */ flg_add_spc_crd=True; }else if(!strcmp(dmn_nm,lnd_nm_in)){ /* Change landunit dimension */ dmn_ids_out[dmn_idx]=dmn_id_lnd_out; dmn_cnt_out[dmn_idx]=lnd_nbr_out; flg_add_spc_crd=True; }else if(!strcmp(dmn_nm,pft_nm_in)){ if(pft_nbr_out > 0L){ /* Change input PFT dimension to PFT if present */ dmn_ids_out[dmn_idx]=dmn_id_pft_out; dmn_cnt_out[dmn_idx]=pft_nbr_out; dmn_in_fst++; dmn_nbr_out++; } /* !pft_nbr_out */ flg_add_spc_crd=True; }else{ /* Dimensions [clm/lnd/pft]_nm_in were pre-defined above as [clm/lnd/pft]_nm_out, replicate all other dimensions */ rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_ids_out+dmn_idx); } /* !clm */ if(rcd != NC_NOERR){ /* Current input dimension is not yet in output file */ (void)fprintf(stdout,"%s: DEBUG var_nm = %s, dmn_nm = %s\n",nco_prg_nm_get(),var_nm,dmn_nm); rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !rcd */ if(flg_add_spc_crd){ /* Follow by spatial dimension(s) */ if(flg_grd_1D){ dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_col_out; dmn_cnt_out[dmn_idx+dmn_in_fst]=col_nbr; } /* !flg_grd_1D */ if(flg_grd_2D){ dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_lat_out; dmn_cnt_out[dmn_idx+dmn_in_fst]=lat_nbr; dmn_in_fst++; dmn_nbr_out++; dmn_ids_out[dmn_idx+dmn_in_fst]=dmn_id_lon_out; dmn_cnt_out[dmn_idx+dmn_in_fst]=lon_nbr; } /* !flg_grd_2D */ } /* !flg_add_spc_crd */ } /* !dmn_idx */ }else{ /* !flg_rgr */ /* Replicate non-S1D variables */ (void)fprintf(stdout,"%s: DEBUG quark5\n",nco_prg_nm_get()); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimname(in_id,dmn_ids_in[dmn_idx],dmn_nm); rcd=nco_inq_dimid_flg(out_id,dmn_nm,dmn_ids_out+dmn_idx); if(rcd != NC_NOERR){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_out+dmn_idx); /* Check-for and, if found, retain record dimension property */ for(int dmn_rec_idx=0;dmn_rec_idx < dmn_nbr_rec;dmn_rec_idx++) if(dmn_ids_in[dmn_idx] == dmn_ids_rec[dmn_rec_idx]) dmn_cnt_out[dmn_idx]=NC_UNLIMITED; rcd=nco_def_dim(out_id,dmn_nm,dmn_cnt_out[dmn_idx],dmn_ids_out+dmn_idx); } /* !rcd */ } /* !dmn_idx */ } /* !flg_rgr */ (void)fprintf(stdout,"%s: DEBUG quark6 defining %s...\n",nco_prg_nm_get(),var_nm); rcd=nco_def_var(out_id,var_nm,var_typ,dmn_nbr_out,dmn_ids_out,&var_id_out); (void)fprintf(stdout,"%s: DEBUG quark7 defined %s\n",nco_prg_nm_get(),var_nm); /* Duplicate netCDF4 settings when possible */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC){ /* Deflation */ if(dmn_nbr_out > 0){ int dfl_lvl_in; /* [enm] Deflate level [0..9] */ rcd=nco_inq_var_deflate(in_id,var_id_in,&shuffle,&deflate,&dfl_lvl_in); /* Copy original deflation settings */ if(deflate || shuffle) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl_in); /* Overwrite HDF Lempel-Ziv compression level, if requested */ if(dfl_lvl == 0) deflate=(int)False; else deflate=(int)True; /* Turn-off shuffle when uncompressing otherwise chunking requests may fail */ if(dfl_lvl == 0) shuffle=NC_NOSHUFFLE; /* Shuffle never, to my knowledge, increases filesize, so shuffle by default when manually deflating */ if(dfl_lvl >= 0) shuffle=NC_SHUFFLE; if(dfl_lvl >= 0) (void)nco_def_var_deflate(out_id,var_id_out,shuffle,deflate,dfl_lvl); } /* !dmn_nbr_out */ } /* !NC_FORMAT_NETCDF4 */ (void)nco_att_cpy(in_id,out_id,var_id_in,var_id_out,PCK_ATT_CPY); /* Variables with subterranean levels and missing-value extrapolation must have _FillValue attribute */ (void)fprintf(stdout,"%s: DEBUG quark8\n",nco_prg_nm_get()); nco_bool flg_add_msv_att; /* [flg] Extrapolation requires _FillValue */ flg_add_msv_att=False; if(flg_add_msv_att && trv.flg_rgr){ has_mss_val=nco_mss_val_get_dbl(in_id,var_id_in,&mss_val_dbl); if(!has_mss_val){ nco_bool flg_att_chg; /* [flg] _FillValue attribute was written */ aed_mtd_fll_val.var_nm=var_nm; aed_mtd_fll_val.id=var_id_out; aed_mtd_fll_val.type=var_typ; if(var_typ == NC_FLOAT) aed_mtd_fll_val.val.fp=&mss_val_flt; else if(var_typ == NC_DOUBLE) aed_mtd_fll_val.val.dp=&mss_val_dbl; flg_att_chg=nco_aed_prc(out_id,var_id_out,aed_mtd_fll_val); if(!flg_att_chg && nco_dbg_lvl_get() >= nco_dbg_std) (void)fprintf(stdout,"%s: WARNING %s reports unsuccessful attempt to create _FillValue attribute for variable %s\n",nco_prg_nm_get(),fnc_nm,var_nm); } /* !has_mss_val */ } /* !flg_add_msv_att */ } /* !rcd */ } /* !var */ } /* end idx_tbl */ (void)fprintf(stdout,"%s: DEBUG quark9\n",nco_prg_nm_get()); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Begin data mode */ (void)nco_enddef(out_id); /* Copy coordinate system before closing template file NB: nco_cpy_var_val() cannot be used here when coordinates are in fl_tpl not fl_in */ (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lat_nm_in,(lmt_sct *)NULL,(int)0); (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lon_nm_in,(lmt_sct *)NULL,(int)0); if(flg_lat_bnd_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lat_bnd_nm,(lmt_sct *)NULL,(int)0); if(flg_lon_bnd_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,lon_bnd_nm,(lmt_sct *)NULL,(int)0); if(flg_sgs_frc_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,sgs_frc_nm,(lmt_sct *)NULL,(int)0); if(flg_sgs_msk_out) (void)nco_cpy_var_val_lmt(hrz_id,out_id,(FILE *)NULL,sgs_msk_nm,(lmt_sct *)NULL,(int)0); if(flg_grd_tpl){ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ /* No further access to template file, close it */ nco_close(tpl_id); /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_tpl); } /* !flg_grd_tpl */ /* Free pre-allocated array space */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(dmn_ids_rec) dmn_ids_rec=(int *)nco_free(dmn_ids_rec); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); /* Unpack and copy data from input file */ //int dmn_idx_col=int_CEWI; /* [idx] Index of column dimension */ //int dmn_idx_lat=int_CEWI; /* [idx] Index of latitude dimension */ //int dmn_idx_lon=int_CEWI; /* [idx] Index of longitude dimension */ int thr_idx; /* [idx] Thread index */ //int var_id; /* [id] Current variable ID */ size_t var_sz_in; /* [nbr] Number of elements in variable (will be self-multiplied) */ size_t var_sz_out; /* [nbr] Number of elements in variable (will be self-multiplied) */ ptr_unn var_val_in; ptr_unn var_val_out; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped as shared in parallel clause */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ #ifdef __GNUG__ # pragma omp parallel for firstprivate(var_val_in,var_val_out) private(dmn_cnt_in,dmn_cnt_out,dmn_ids_in,dmn_ids_out,dmn_idx,dmn_nbr_in,dmn_nbr_out,dmn_nbr_max,dmn_nm,dmn_srt,has_clm,has_grd,has_lnd,has_pft,has_mss_val,idx_out,idx_tbl,in_id,mss_val_dbl,rcd,thr_idx,trv,var_id_in,var_id_out,var_nm,var_sz_in,var_sz_out,var_typ) shared(dmn_id_clm_in,dmn_id_clm_out,dmn_id_col_in,dmn_id_col_out,dmn_id_lat_in,dmn_id_lat_out,dmn_id_lnd_in,dmn_id_lnd_out,dmn_id_lon_in,dmn_id_lon_out,dmn_id_pft_in,dmn_id_pft_out,flg_s1d_clm,flg_s1d_pft,clm_nbr_in,clm_nbr_out,col_nbr,lat_nbr,lnd_nbr_in,lnd_nbr_out,lon_nbr,pft_nbr_in,pft_nbr_out,out_id,pfts1d_ixy,pfts1d_jxy) #endif /* !__GNUG__ */ for(idx_tbl=0;idx_tbl<trv_nbr;idx_tbl++){ trv=trv_tbl->lst[idx_tbl]; thr_idx=omp_get_thread_num(); in_id=trv_tbl->in_id_arr[thr_idx]; #ifdef _OPENMP if(nco_dbg_lvl_get() >= nco_dbg_grp && !thr_idx && !idx_tbl) (void)fprintf(fp_stdout,"%s: INFO %s reports regrid loop uses %d thread%s\n",nco_prg_nm_get(),fnc_nm,omp_get_num_threads(),(omp_get_num_threads() > 1) ? "s" : ""); if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s: INFO thread = %d, idx_tbl = %d, nm = %s\n",nco_prg_nm_get(),thr_idx,idx_tbl,trv.nm); #endif /* !_OPENMP */ if(trv.nco_typ == nco_obj_typ_var && trv.flg_xtr){ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(fp_stdout,"%s%s ",trv.flg_rgr ? "#" : "~",trv.nm); if(trv.flg_rgr){ /* Unpack variable */ var_nm=trv.nm; var_typ=trv.var_typ; /* NB: Output type in file is same as input type */ var_sz_in=1L; var_sz_out=1L; rcd=nco_inq_varid(in_id,var_nm,&var_id_in); rcd=nco_inq_varid(out_id,var_nm,&var_id_out); rcd=nco_inq_varndims(in_id,var_id_in,&dmn_nbr_in); rcd=nco_inq_varndims(out_id,var_id_out,&dmn_nbr_out); dmn_nbr_max= dmn_nbr_in > dmn_nbr_out ? dmn_nbr_in : dmn_nbr_out; dmn_ids_in=(int *)nco_malloc(dmn_nbr_in*sizeof(int)); dmn_ids_out=(int *)nco_malloc(dmn_nbr_out*sizeof(int)); dmn_srt=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); /* max() for both input and output grids */ dmn_cnt_in=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); dmn_cnt_out=(long *)nco_malloc(dmn_nbr_max*sizeof(long)); rcd=nco_inq_vardimid(in_id,var_id_in,dmn_ids_in); rcd=nco_inq_vardimid(out_id,var_id_out,dmn_ids_out); for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ rcd=nco_inq_dimlen(in_id,dmn_ids_in[dmn_idx],dmn_cnt_in+dmn_idx); var_sz_in*=dmn_cnt_in[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ for(dmn_idx=0;dmn_idx<dmn_nbr_out;dmn_idx++){ rcd=nco_inq_dimlen(out_id,dmn_ids_out[dmn_idx],dmn_cnt_out+dmn_idx); if(dmn_cnt_out[dmn_idx] == 0L){ /* No records have been written, so overwrite zero output record size with input record size */ char dmn_rec_nm[NC_MAX_NAME]; /* [sng] Record dimension name */ int dmn_rec_id_in; rcd=nco_inq_dimname(out_id,dmn_ids_out[dmn_idx],dmn_rec_nm); rcd=nco_inq_dimid(in_id,dmn_rec_nm,&dmn_rec_id_in); rcd=nco_inq_dimlen(in_id,dmn_rec_id_in,dmn_cnt_out+dmn_idx); } /* !dmn_cnt_out */ var_sz_out*=dmn_cnt_out[dmn_idx]; dmn_srt[dmn_idx]=0L; } /* !dmn_idx */ var_val_in.vp=(void *)nco_malloc_dbg(var_sz_in*nco_typ_lng(var_typ),fnc_nm,"Unable to malloc() input value buffer"); var_val_out.vp=(void *)nco_malloc_dbg(var_sz_out*nco_typ_lng(var_typ),fnc_nm,"Unable to malloc() output value buffer"); /* Initialize output */ (void)memset(var_val_out.vp,0,var_sz_out*nco_typ_lng(var_typ)); /* Obtain input variable */ rcd=nco_get_vara(in_id,var_id_in,dmn_srt,dmn_cnt_in,var_val_in.vp,var_typ); has_clm=has_grd=has_lnd=has_pft=False; nco_s1d_typ=nco_s1d_nil; for(dmn_idx=0;dmn_idx<dmn_nbr_in;dmn_idx++){ dmn_nm_cp=trv.var_dmn[dmn_idx].dmn_nm; if(!has_clm && clm_nm_in) has_clm=!strcmp(dmn_nm_cp,clm_nm_in); if(!has_grd && grd_nm_in) has_grd=!strcmp(dmn_nm_cp,grd_nm_in); if(!has_lnd && lnd_nm_in) has_lnd=!strcmp(dmn_nm_cp,lnd_nm_in); if(!has_pft && pft_nm_in) has_pft=!strcmp(dmn_nm_cp,pft_nm_in); } /* !dmn_idx */ if(has_clm) nco_s1d_typ=nco_s1d_clm; else if(has_grd) nco_s1d_typ=nco_s1d_grd; else if(has_lnd) nco_s1d_typ=nco_s1d_lnd; else if(has_pft) nco_s1d_typ=nco_s1d_pft; else{ (void)fprintf(stderr,"%s: ERROR %s reports variable %s does not appear to be sparse\n",nco_prg_nm_get(),fnc_nm,var_nm); nco_exit(EXIT_FAILURE); } /* !strstr() */ if(nco_dbg_lvl_get() >= nco_dbg_std){ (void)fprintf(stderr,"%s: INFO %s reports variable %s is sparse type %s\n",nco_prg_nm_get(),fnc_nm,var_nm,nco_s1d_sng(nco_s1d_typ)); } /* !dbg */ /* The Hard Work */ if(nco_s1d_typ == nco_s1d_pft){ /* Turn GPP(time,pft) into GPP(time,pft,lndgrid) */ for(pft_idx=0;pft_idx<pft_nbr_in;pft_idx++){ pft_typ=pfts1d_ityp_veg[pft_idx]; /* [1 <= pft_typ <= pft_nbr_out] */ /* Skip bare ground, output array contains only vegetated types */ if(!pft_typ) continue; /* grd_idx is the index relative to the origin of the horizontal grid for a given level [0 <= grd_idx_out <= col_nbr_out-1L], [1 <= pfts1d_ixy <= col_nbr_out] */ grd_idx_out= flg_grd_1D ? pfts1d_ixy[pft_idx]-1L : (pfts1d_ixy[pft_idx]-1L)*lat_nbr+(pfts1d_jxy[pft_idx]-1L); idx_out=(pft_typ-1)*grd_sz_out+grd_idx_out; /* memcpy() would allow next statement to work for generic types However, memcpy() is a system call and could be expensive in an innermost loop */ switch(var_typ){ case NC_FLOAT: var_val_out.fp[idx_out]=var_val_in.fp[pft_idx]; break; case NC_DOUBLE: var_val_out.dp[idx_out]=var_val_in.dp[pft_idx]; break; case NC_INT: var_val_out.ip[idx_out]=var_val_in.ip[pft_idx]; break; default: (void)fprintf(fp_stdout,"%s: ERROR %s reports unsupported type\n",nco_prg_nm_get(),fnc_nm); nco_dfl_case_nc_type_err(); break; } /* !var_typ */ } /* !idx */ } /* !nco_s1d_typ */ #pragma omp critical { /* begin OpenMP critical */ rcd=nco_put_vara(out_id,var_id_out,dmn_srt,dmn_cnt_out,var_val_out.vp,var_typ); } /* end OpenMP critical */ if(dmn_ids_in) dmn_ids_in=(int *)nco_free(dmn_ids_in); if(dmn_ids_out) dmn_ids_out=(int *)nco_free(dmn_ids_out); if(dmn_srt) dmn_srt=(long *)nco_free(dmn_srt); if(dmn_cnt_in) dmn_cnt_in=(long *)nco_free(dmn_cnt_in); if(dmn_cnt_out) dmn_cnt_out=(long *)nco_free(dmn_cnt_out); if(var_val_in.vp) var_val_in.vp=(void *)nco_free(var_val_in.vp); if(var_val_out.vp) var_val_out.vp=(void *)nco_free(var_val_out.vp); }else{ /* !trv.flg_rgr */ /* Use standard NCO copy routine for variables that are not regridded 20190511: Copy them only once */ #pragma omp critical { /* begin OpenMP critical */ (void)nco_cpy_var_val(in_id,out_id,(FILE *)NULL,(md5_sct *)NULL,trv.nm,trv_tbl); } /* end OpenMP critical */ } /* !flg_rgr */ } /* !xtr */ } /* end (OpenMP parallel for) loop over idx_tbl */ if(nco_dbg_lvl_get() >= nco_dbg_var) (void)fprintf(stdout,"\n"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(stdout,"%s: INFO %s completion report: Variables interpolated = %d, copied unmodified = %d, omitted = %d, created = %d\n",nco_prg_nm_get(),fnc_nm,var_rgr_nbr,var_cpy_nbr,var_xcl_nbr,var_crt_nbr); /* Free output data memory */ if(cols1d_ityp) cols1d_ityp=(int *)nco_free(cols1d_ityp); if(cols1d_ityplun) cols1d_ityplun=(int *)nco_free(cols1d_ityplun); if(pfts1d_ityp_veg) pfts1d_ityp_veg=(int *)nco_free(pfts1d_ityp_veg); if(pfts1d_ityplun) pfts1d_ityplun=(int *)nco_free(pfts1d_ityplun); if(pfts1d_ixy) pfts1d_ixy=(int *)nco_free(pfts1d_ixy); if(pfts1d_jxy) pfts1d_jxy=(int *)nco_free(pfts1d_jxy); //if(pfts1d_wtgcell) pfts1d_wtgcell=(double *)nco_free(pfts1d_wtgcell); if(clm_nm_in) clm_nm_in=(char *)nco_free(clm_nm_in); if(grd_nm_in) grd_nm_in=(char *)nco_free(grd_nm_in); if(lnd_nm_in) lnd_nm_in=(char *)nco_free(lnd_nm_in); if(pft_nm_in) pft_nm_in=(char *)nco_free(pft_nm_in); if(clm_nm_out) clm_nm_out=(char *)nco_free(clm_nm_out); if(grd_nm_out) grd_nm_out=(char *)nco_free(grd_nm_out); if(lnd_nm_out) lnd_nm_out=(char *)nco_free(lnd_nm_out); if(pft_nm_out) pft_nm_out=(char *)nco_free(pft_nm_out); return rcd; } /* !nco_s1d_unpack() */
bench.c
#include "omp.h" #include "pmsis.h" #define LOOP_ITER (2048) #define NB_ITER (256) #define NB_BARRIER_ITER (256) #define NB_ITER_SINGLE (128) #define CORE_ID pi_core_id() #define PRINTF(...) //#define PRINTF(...) printf(__VA_ARGS__) static void start_timer() { pi_perf_cl_reset(); pi_perf_conf(1<<PI_PERF_CYCLES); pi_perf_cl_start(); } static void reset_timer() { pi_perf_cl_reset(); } static unsigned int get_time() { return pi_perf_cl_read(PI_PERF_CYCLES); } static inline unsigned int startTimer() { PRINTF("Starting timer\n"); reset_timer(); start_timer(); return 0; } static inline unsigned int getTimer(unsigned int start) { PRINTF("Ending timer\n"); return get_time(); } void test_barrier(unsigned int nthreads) { #pragma omp parallel num_threads(nthreads) shared(nthreads) { unsigned int start; int i; float operation_cost = 0; if (omp_get_thread_num() == 0) { start = startTimer(); } for (i = 0; i < NB_BARRIER_ITER; i++) { #pragma omp barrier } if (omp_get_thread_num() == 0) { unsigned int end = getTimer(start); operation_cost = (float) end / NB_BARRIER_ITER; printf("BARRIER %d threads: %f cycles\n", nthreads, operation_cost); } } } void test_critical(unsigned int nthreads) { #pragma omp parallel num_threads(nthreads) { int i; unsigned int start = startTimer(); float operation_cost = 0; for (i = 0; i < NB_ITER; i++) { #pragma omp critical { volatile int a = 0; } } #pragma omp barrier operation_cost = (float) getTimer(start) / NB_ITER; if (CORE_ID == 0) { printf("CRITICAL %d threads: %.3f cycles\n", nthreads, operation_cost); } } } void test_parallel_loop_static(unsigned int nthreads) { int i; int j; unsigned int start = startTimer(); float iteration_cost = 0; for (i = 0; i < NB_ITER; i++) { #pragma omp parallel for num_threads(nthreads) for (j = 0; j < LOOP_ITER; j++) { volatile int a = j; } } iteration_cost = ((float) getTimer(start)/(NB_ITER * LOOP_ITER)); printf("PARALLEL FOR %d threads STATIC %d iter: %.3f cycle(s) per iteration\n", nthreads, LOOP_ITER, iteration_cost); } void test_entry() { for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++) { test_barrier(i); } printf("\n"); for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++) { test_critical(i); } printf("\n"); for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++) { test_parallel_loop_static (i); } } void launch_test(void) { printf("Entering main controller\n"); uint32_t errors = 0; uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id(); struct pi_device cluster_dev = {0}; struct pi_cluster_conf cl_conf = {0}; /* Init cluster configuration structure. */ pi_cluster_conf_init(&cl_conf); cl_conf.id = 0; /* Set cluster ID. */ /* Configure & open cluster. */ pi_open_from_conf(&cluster_dev, &cl_conf); if (pi_cluster_open(&cluster_dev)) { printf("Cluster open failed !\n"); pmsis_exit(-1); } /* Prepare cluster task and send it to cluster. */ struct pi_cluster_task cl_task = {0}; cl_task.entry = test_entry; cl_task.arg = NULL; pi_cluster_send_task_to_cl(&cluster_dev, &cl_task); pi_cluster_close(&cluster_dev); printf("Test success !\n"); pmsis_exit(errors); } /* Program Entry. */ int main(void) { printf("\n\n\t *** OpenMP Benchmark ***\n\n"); return pmsis_kickoff((void *) launch_test); }
ConverterOSG.h
/* -*-c++-*- IfcQuery www.ifcquery.com * MIT License Copyright (c) 2017 Fabian Gerold Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #include <osg/CullFace> #include <osg/Geode> #include <osg/Hint> #include <osg/LineWidth> #include <osg/Material> #include <osg/Point> #include <osgUtil/Tessellator> #include <ifcpp/model/BasicTypes.h> #include <ifcpp/model/StatusCallback.h> #include <ifcpp/IFC4/include/IfcCurtainWall.h> #include <ifcpp/IFC4/include/IfcFeatureElementSubtraction.h> #include <ifcpp/IFC4/include/IfcGloballyUniqueId.h> #include <ifcpp/IFC4/include/IfcProject.h> #include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h> #include <ifcpp/IFC4/include/IfcRelAggregates.h> #include <ifcpp/IFC4/include/IfcSpace.h> #include <ifcpp/IFC4/include/IfcWindow.h> #include <ifcpp/geometry/GeometrySettings.h> #include <ifcpp/geometry/SceneGraphUtils.h> #include <ifcpp/geometry/AppearanceData.h> #include "GeometryInputData.h" #include "IncludeCarveHeaders.h" #include "CSG_Adapter.h" class ConverterOSG : public StatusCallback { protected: shared_ptr<GeometrySettings> m_geom_settings; std::map<std::string, osg::ref_ptr<osg::Switch> > m_map_entity_guid_to_switch; std::map<int, osg::ref_ptr<osg::Switch> > m_map_representation_id_to_switch; double m_recent_progress; osg::ref_ptr<osg::CullFace> m_cull_back_off; osg::ref_ptr<osg::StateSet> m_glass_stateset; //\brief StateSet caching and re-use std::vector<osg::ref_ptr<osg::StateSet> > m_vec_existing_statesets; bool m_enable_stateset_caching = false; #ifdef ENABLE_OPENMP Mutex m_writelock_appearance_cache; #endif public: ConverterOSG( shared_ptr<GeometrySettings>& geom_settings ) : m_geom_settings(geom_settings) { m_cull_back_off = new osg::CullFace( osg::CullFace::BACK ); m_glass_stateset = new osg::StateSet(); m_glass_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); m_glass_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } virtual ~ConverterOSG() {} // Map: IfcProduct ID -> scenegraph switch std::map<std::string, osg::ref_ptr<osg::Switch> >& getMapEntityGUIDToSwitch() { return m_map_entity_guid_to_switch; } // Map: Representation Identifier -> scenegraph switch std::map<int, osg::ref_ptr<osg::Switch> >& getMapRepresentationToSwitch() { return m_map_representation_id_to_switch; } void clearInputCache() { m_map_entity_guid_to_switch.clear(); m_map_representation_id_to_switch.clear(); m_vec_existing_statesets.clear(); } static void drawBoundingBox( const carve::geom::aabb<3>& aabb, osg::Geometry* geom ) { osg::ref_ptr<osg::Vec3Array> vertices = dynamic_cast<osg::Vec3Array*>( geom->getVertexArray() ); if( !vertices ) { vertices = new osg::Vec3Array(); geom->setVertexArray( vertices ); } const carve::geom::vector<3>& aabb_pos = aabb.pos; const carve::geom::vector<3>& extent = aabb.extent; const double dex = extent.x; const double dey = extent.y; const double dez = extent.z; const int vert_id_offset = vertices->size(); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z - dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y - dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y - dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x + dex, aabb_pos.y + dey, aabb_pos.z + dez ) ); vertices->push_back( osg::Vec3f( aabb_pos.x - dex, aabb_pos.y + dey, aabb_pos.z + dez ) ); osg::ref_ptr<osg::DrawElementsUInt> box_lines = new osg::DrawElementsUInt( GL_LINE_STRIP, 0 ); box_lines->push_back( vert_id_offset + 0 ); box_lines->push_back( vert_id_offset + 1 ); box_lines->push_back( vert_id_offset + 2 ); box_lines->push_back( vert_id_offset + 3 ); box_lines->push_back( vert_id_offset + 0 ); box_lines->push_back( vert_id_offset + 4 ); box_lines->push_back( vert_id_offset + 5 ); box_lines->push_back( vert_id_offset + 1 ); box_lines->push_back( vert_id_offset + 5 ); box_lines->push_back( vert_id_offset + 6 ); box_lines->push_back( vert_id_offset + 2 ); box_lines->push_back( vert_id_offset + 6 ); box_lines->push_back( vert_id_offset + 7 ); box_lines->push_back( vert_id_offset + 3 ); box_lines->push_back( vert_id_offset + 7 ); box_lines->push_back( vert_id_offset + 4 ); geom->addPrimitiveSet( box_lines ); osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ) { throw OutOfMemoryException(); } osg::Vec4f ambientColor( 1.f, 0.2f, 0.1f, 1.f ); mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, ambientColor ); //mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); //mat->setColorMode( osg::Material::SPECULAR ); osg::StateSet* stateset = geom->getOrCreateStateSet(); if( !stateset ) { throw OutOfMemoryException(); } stateset->setAttribute( mat, osg::StateAttribute::ON ); stateset->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); } static void drawFace( const carve::mesh::Face<3>* face, osg::Geode* geode, bool add_color_array = false ) { #ifdef _DEBUG std::cout << "not triangulated" << std::endl; #endif std::vector<vec3> face_vertices; face_vertices.resize( face->nVertices() ); carve::mesh::Edge<3> *e = face->edge; const size_t num_vertices = face->nVertices(); for( size_t i = 0; i < num_vertices; ++i ) { face_vertices[i] = e->v1()->v; e = e->next; } if( num_vertices < 4 ) { std::cout << "drawFace is meant only for num vertices > 4" << std::endl; } vec3* vertex_vec; osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array( num_vertices ); if( !vertices ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::DrawElementsUInt> triangles = new osg::DrawElementsUInt( osg::PrimitiveSet::POLYGON, num_vertices ); if( !triangles ) { throw OutOfMemoryException(); } for( size_t i = 0; i < num_vertices; ++i ) { vertex_vec = &face_vertices[num_vertices - i - 1]; ( *vertices )[i].set( vertex_vec->x, vertex_vec->y, vertex_vec->z ); ( *triangles )[i] = i; } osg::Vec3f poly_normal = SceneGraphUtils::computePolygonNormal( vertices ); osg::ref_ptr<osg::Vec3Array> normals = new osg::Vec3Array(); normals->resize( num_vertices, poly_normal ); osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->setNormalArray( normals ); normals->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POLYGON, 0, vertices->size() ) ); if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); colors->resize( vertices->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } if( num_vertices > 4 ) { // TODO: check if polygon is convex with Gift wrapping algorithm osg::ref_ptr<osgUtil::Tessellator> tesselator = new osgUtil::Tessellator(); tesselator->setTessellationType( osgUtil::Tessellator::TESS_TYPE_POLYGONS ); //tesselator->setWindingType( osgUtil::Tessellator::TESS_WINDING_ODD ); tesselator->retessellatePolygons( *geometry ); } geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < num_vertices; ++i ) { vertex_vec = &face_vertices[num_vertices - i - 1]; vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec->x, vertex_vec->y, vertex_vec->z ) + poly_normal ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( num_vertices * 2, osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); geode->addDrawable( geometry_normals ); #endif } //#define DEBUG_DRAW_NORMALS static void drawMeshSet( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* geode, double crease_angle = M_PI*0.05, bool add_color_array = false ) { if( !meshset ) { return; } osg::ref_ptr<osg::Vec3Array> vertices_tri = new osg::Vec3Array(); if( !vertices_tri ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::Vec3Array> normals_tri = new osg::Vec3Array(); if( !normals_tri ) { throw OutOfMemoryException(); } osg::ref_ptr<osg::Vec3Array> vertices_quad; osg::ref_ptr<osg::Vec3Array> normals_quad; const size_t max_num_faces_per_vertex = 10000; std::map<carve::mesh::Face<3>*, double> map_face_area; std::map<carve::mesh::Face<3>*, double>::iterator it_face_area; if( crease_angle > 0 ) { for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const size_t num_faces = mesh->faces.size(); for( size_t i_face = 0; i_face != num_faces; ++i_face ) { carve::mesh::Face<3>* face = mesh->faces[i_face]; // compute area of projected face: std::vector<vec2> projected; face->getProjectedVertices( projected ); double face_area = carve::geom2d::signedArea( projected ); map_face_area[face] = abs( face_area ); } } } for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const size_t num_faces = mesh->faces.size(); for( size_t i_face = 0; i_face != num_faces; ++i_face ) { carve::mesh::Face<3>* face = mesh->faces[i_face]; const size_t n_vertices = face->nVertices(); if( n_vertices > 4 ) { drawFace( face, geode ); continue; } const vec3 face_normal = face->plane.N; if( crease_angle > 0 ) { carve::mesh::Edge<3>* e = face->edge; for( size_t jj = 0; jj < n_vertices; ++jj ) { carve::mesh::Vertex<3>* vertex = e->vert; vec3 intermediate_normal; // collect all faces at vertex // | ^ // | | // f1 e->rev | | e face // v | // <---e1------- <--------------- //-------------> ---------------> // | ^ // | | // v | carve::mesh::Edge<3>* e1 = e;// ->rev->next; carve::mesh::Face<3>* f1 = e1->face; #ifdef _DEBUG if( f1 != face ) { std::cout << "f1 != face" << std::endl; } #endif for( size_t i3 = 0; i3 < max_num_faces_per_vertex; ++i3 ) { if( !e1->rev ) { break; } if( !e1->rev->next ) { break; } vec3 f1_normal = f1->plane.N; const double cos_angle = dot( f1_normal, face_normal ); if( cos_angle > 0 ) { const double deviation = std::abs( cos_angle - 1.0 ); if( deviation < crease_angle ) { double weight = 0.0; it_face_area = map_face_area.find( f1 ); if( it_face_area != map_face_area.end() ) { weight = it_face_area->second; } intermediate_normal += weight*f1_normal; } } if( !e1->rev ) { // it's an open mesh break; } e1 = e1->rev->next; if( !e1 ) { break; } f1 = e1->face; #ifdef _DEBUG if( e1->vert != vertex ) { std::cout << "e1->vert != vertex" << std::endl; } #endif if( f1 == face ) { break; } } const double intermediate_normal_length = intermediate_normal.length(); if( intermediate_normal_length < 0.0000000001 ) { intermediate_normal = face_normal; } else { // normalize: intermediate_normal *= 1.0 / intermediate_normal_length; } const vec3& vertex_v = vertex->v; if( face->n_edges == 3 ) { vertices_tri->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); normals_tri->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) ); } else if( face->n_edges == 4 ) { if( !vertices_quad ) vertices_quad = new osg::Vec3Array(); vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); if( !normals_quad ) normals_quad = new osg::Vec3Array(); normals_quad->push_back( osg::Vec3( intermediate_normal.x, intermediate_normal.y, intermediate_normal.z ) ); } e = e->next; } } else { carve::mesh::Edge<3>* e = face->edge; for( size_t jj = 0; jj < n_vertices; ++jj ) { carve::mesh::Vertex<3>* vertex = e->vert; const vec3& vertex_v = vertex->v; if( face->n_edges == 3 ) { vertices_tri->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); normals_tri->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) ); } else if( face->n_edges == 4 ) { if( !vertices_quad ) vertices_quad = new osg::Vec3Array(); vertices_quad->push_back( osg::Vec3( vertex_v.x, vertex_v.y, vertex_v.z ) ); if( !normals_quad ) normals_quad = new osg::Vec3Array(); normals_quad->push_back( osg::Vec3( face_normal.x, face_normal.y, face_normal.z ) ); } e = e->next; } } } } if( vertices_tri->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices_tri ); geometry->setNormalArray( normals_tri ); normals_tri->setBinding( osg::Array::BIND_PER_VERTEX ); if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); if( !colors ) { throw OutOfMemoryException(); } colors->resize( vertices_tri->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::TRIANGLES, 0, vertices_tri->size() ) ); if( !geometry ) { throw OutOfMemoryException(); } geode->addDrawable( geometry ); #ifdef DEBUG_DRAW_NORMALS osg::ref_ptr<osg::Vec3Array> vertices_normals = new osg::Vec3Array(); for( size_t i = 0; i < vertices_tri->size(); ++i ) { osg::Vec3f& vertex_vec = vertices_tri->at( i );// [i]; osg::Vec3f& normal_vec = normals_tri->at( i ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) ); vertices_normals->push_back( osg::Vec3f( vertex_vec.x(), vertex_vec.y(), vertex_vec.z() ) + normal_vec ); } osg::ref_ptr<osg::Vec4Array> colors_normals = new osg::Vec4Array(); colors_normals->resize( vertices_normals->size(), osg::Vec4f( 0.4f, 0.7f, 0.4f, 1.f ) ); osg::ref_ptr<osg::Geometry> geometry_normals = new osg::Geometry(); geometry_normals->setVertexArray( vertices_normals ); geometry_normals->setColorArray( colors_normals ); geometry_normals->setColorBinding( osg::Geometry::BIND_PER_VERTEX ); geometry_normals->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry_normals->setNormalBinding( osg::Geometry::BIND_OFF ); geometry_normals->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices_normals->size() ) ); geode->addDrawable( geometry_normals ); #endif } if( vertices_quad ) { if( vertices_quad->size() > 0 ) { osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices_quad ); if( normals_quad ) { normals_quad->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setNormalArray( normals_quad ); } if( add_color_array ) { osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array(); if( !colors ) { throw OutOfMemoryException(); } colors->resize( vertices_quad->size(), osg::Vec4f( 0.6f, 0.6f, 0.6f, 0.1f ) ); colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::QUADS, 0, vertices_quad->size() ) ); if( !geometry ) { throw OutOfMemoryException(); } geode->addDrawable( geometry ); } } } static void drawPolyline( const carve::input::PolylineSetData* polyline_data, osg::Geode* geode, bool add_color_array = false ) { osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); if( !vertices ) { throw OutOfMemoryException(); } carve::line::PolylineSet* polyline_set = polyline_data->create( carve::input::opts() ); if( polyline_set->vertices.size() < 2 ) { #ifdef _DEBUG std::cout << __FUNC__ << ": polyline_set->vertices.size() < 2" << std::endl; #endif return; } for( auto it = polyline_set->lines.begin(); it != polyline_set->lines.end(); ++it ) { const carve::line::Polyline* pline = *it; size_t vertex_count = pline->vertexCount(); for( size_t vertex_i = 0; vertex_i < vertex_count; ++vertex_i ) { if( vertex_i >= polyline_set->vertices.size() ) { #ifdef _DEBUG std::cout << __FUNC__ << ": vertex_i >= polyline_set->vertices.size()" << std::endl; #endif continue; } const carve::line::Vertex* v = pline->vertex( vertex_i ); vertices->push_back( osg::Vec3d( v->v[0], v->v[1], v->v[2] ) ); } } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); if( !geometry ) { throw OutOfMemoryException(); } geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINE_STRIP, 0, vertices->size() ) ); if( add_color_array ) { osg::Vec4f color( 0.6f, 0.6f, 0.6f, 0.1f ); osg::ref_ptr<osg::Vec4Array> colors = new osg::Vec4Array( vertices->size(), &color ); if( !colors ) { throw OutOfMemoryException(); } colors->setBinding( osg::Array::BIND_PER_VERTEX ); geometry->setColorArray( colors ); } geode->addDrawable( geometry ); } void computeCreaseEdgesFromMeshset( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, std::vector<carve::mesh::Edge<3>* >& vec_edges_out, const double crease_angle ) { if( !meshset ) { return; } for( size_t i_mesh = 0; i_mesh < meshset->meshes.size(); ++i_mesh ) { const carve::mesh::Mesh<3>* mesh = meshset->meshes[i_mesh]; const std::vector<carve::mesh::Edge<3>* >& vec_closed_edges = mesh->closed_edges; for( size_t i_edge = 0; i_edge < vec_closed_edges.size(); ++i_edge ) { carve::mesh::Edge<3>* edge = vec_closed_edges[i_edge]; if( !edge ) { continue; } carve::mesh::Edge<3>* edge_reverse = edge->rev; if( !edge_reverse ) { continue; } carve::mesh::Face<3>* face = edge->face; carve::mesh::Face<3>* face_reverse = edge_reverse->face; const carve::geom::vector<3>& f1_normal = face->plane.N; const carve::geom::vector<3>& f2_normal = face_reverse->plane.N; const double cos_angle = dot( f1_normal, f2_normal ); if( cos_angle > 0 ) { const double deviation = std::abs( cos_angle - 1.0 ); if( deviation < crease_angle ) { continue; } } // TODO: if area of face and face_reverse is equal, skip the crease edge. It could be the inside or outside of a cylinder. Check also if > 2 faces in a row have same normal angle differences vec_edges_out.push_back( edge ); } } } void renderMeshsetCreaseEdges( const shared_ptr<carve::mesh::MeshSet<3> >& meshset, osg::Geode* target_geode, const double crease_angle ) { if( !meshset ) { return; } if( !target_geode ) { return; } std::vector<carve::mesh::Edge<3>* > vec_crease_edges; computeCreaseEdgesFromMeshset( meshset, vec_crease_edges, crease_angle ); if( vec_crease_edges.size() > 0 ) { osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t i_edge = 0; i_edge < vec_crease_edges.size(); ++i_edge ) { const carve::mesh::Edge<3>* edge = vec_crease_edges[i_edge]; const carve::geom::vector<3>& vertex1 = edge->v1()->v; const carve::geom::vector<3>& vertex2 = edge->v2()->v; vertices->push_back( osg::Vec3d( vertex1.x, vertex1.y, vertex1.z ) ); vertices->push_back( osg::Vec3d( vertex2.x, vertex2.y, vertex2.z ) ); } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setName("creaseEdges"); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::LINES, 0, vertices->size() ) ); geometry->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geometry->getOrCreateStateSet()->setMode( GL_BLEND, osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::LineWidth( 3.0f ), osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setMode( GL_LINE_SMOOTH, osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setAttributeAndModes( new osg::Hint( GL_LINE_SMOOTH_HINT, GL_NICEST ), osg::StateAttribute::ON ); geometry->getOrCreateStateSet()->setRenderBinDetails( 10, "RenderBin"); target_geode->addDrawable( geometry ); } } void applyAppearancesToGroup( const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances, osg::Group* grp ) { for( size_t ii = 0; ii < vec_product_appearances.size(); ++ii ) { const shared_ptr<AppearanceData>& appearance = vec_product_appearances[ii]; if( !appearance ) { continue; } if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_SURFACE || appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_ANY ) { osg::ref_ptr<osg::StateSet> item_stateset; convertToOSGStateSet( appearance, item_stateset ); if( item_stateset ) { osg::StateSet* existing_item_stateset = grp->getStateSet(); if( existing_item_stateset ) { if( existing_item_stateset != item_stateset ) { existing_item_stateset->merge( *item_stateset ); } } else { grp->setStateSet( item_stateset ); } } } else if( appearance->m_apply_to_geometry_type == AppearanceData::GEOM_TYPE_CURVE ) { } } } osg::Matrixd convertMatrixToOSG( const carve::math::Matrix& mat_in ) { return osg::Matrixd( mat_in.m[0][0], mat_in.m[0][1], mat_in.m[0][2], mat_in.m[0][3], mat_in.m[1][0], mat_in.m[1][1], mat_in.m[1][2], mat_in.m[1][3], mat_in.m[2][0], mat_in.m[2][1], mat_in.m[2][2], mat_in.m[2][3], mat_in.m[3][0], mat_in.m[3][1], mat_in.m[3][2], mat_in.m[3][3] ); } //\brief method convertProductShapeToOSG: creates geometry objects from an IfcProduct object // caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock void convertProductShapeToOSG( shared_ptr<ProductShapeData>& product_shape, std::map<int, osg::ref_ptr<osg::Switch> >& map_representation_switches ) { if( product_shape->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition ); shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>( ifc_object_def ); if( !ifc_product ) { return; } std::string product_guid; if (ifc_product->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; product_guid = converterX.to_bytes(ifc_product->m_GlobalId->m_value); } std::stringstream strs_product_switch_name; strs_product_switch_name << product_guid << ":" << ifc_product->className() << " group"; bool draw_bounding_box = false; // create OSG objects std::vector<shared_ptr<RepresentationData> >& vec_product_representations = product_shape->m_vec_representations; for( size_t ii_representation = 0; ii_representation < vec_product_representations.size(); ++ii_representation ) { const shared_ptr<RepresentationData>& product_representation_data = vec_product_representations[ii_representation]; if( product_representation_data->m_ifc_representation.expired() ) { continue; } shared_ptr<IfcRepresentation> ifc_representation( product_representation_data->m_ifc_representation ); const int representation_id = ifc_representation->m_entity_id; osg::ref_ptr<osg::Switch> representation_switch = new osg::Switch(); #ifdef _DEBUG std::stringstream strs_representation_name; strs_representation_name << strs_product_switch_name.str().c_str() << ", representation " << ii_representation; representation_switch->setName( strs_representation_name.str().c_str() ); #endif const std::vector<shared_ptr<ItemShapeData> >& product_items = product_representation_data->m_vec_item_data; for( size_t i_item = 0; i_item < product_items.size(); ++i_item ) { const shared_ptr<ItemShapeData>& item_shape = product_items[i_item]; osg::ref_ptr<osg::MatrixTransform> item_group = new osg::MatrixTransform(); if( !item_group ) { throw OutOfMemoryException( __FUNC__ ); } #ifdef _DEBUG std::stringstream strs_item_name; strs_item_name << strs_representation_name.str().c_str() << ", item " << i_item; item_group->setName( strs_item_name.str().c_str() ); #endif // create shape for open shells for( size_t ii = 0; ii < item_shape->m_meshsets_open.size(); ++ii ) { shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets_open[ii]; CSG_Adapter::retriangulateMeshSet( item_meshset ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } drawMeshSet( item_meshset, geode, m_geom_settings->getCoplanarFacesMaxDeltaAngle() ); if( m_geom_settings->getRenderCreaseEdges() ) { renderMeshsetCreaseEdges( item_meshset, geode, m_geom_settings->getCreaseEdgesMaxDeltaAngle() ); } // disable back face culling for open meshes geode->getOrCreateStateSet()->setAttributeAndModes( m_cull_back_off.get(), osg::StateAttribute::OFF ); item_group->addChild( geode ); if( draw_bounding_box ) { carve::geom::aabb<3> bbox = item_meshset->getAABB(); osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry(); drawBoundingBox( bbox, bbox_geom ); geode->addDrawable( bbox_geom ); } #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", open meshset " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } // create shape for meshsets for( size_t ii = 0; ii < item_shape->m_meshsets.size(); ++ii ) { shared_ptr<carve::mesh::MeshSet<3> >& item_meshset = item_shape->m_meshsets[ii]; CSG_Adapter::retriangulateMeshSet( item_meshset ); osg::ref_ptr<osg::Geode> geode_meshset = new osg::Geode(); if( !geode_meshset ) { throw OutOfMemoryException( __FUNC__ ); } drawMeshSet( item_meshset, geode_meshset, m_geom_settings->getCoplanarFacesMaxDeltaAngle() ); item_group->addChild( geode_meshset ); if( m_geom_settings->getRenderCreaseEdges() ) { renderMeshsetCreaseEdges( item_meshset, geode_meshset, m_geom_settings->getCreaseEdgesMaxDeltaAngle() ); } if( draw_bounding_box ) { carve::geom::aabb<3> bbox = item_meshset->getAABB(); osg::ref_ptr<osg::Geometry> bbox_geom = new osg::Geometry(); drawBoundingBox( bbox, bbox_geom ); geode_meshset->addDrawable( bbox_geom ); } #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", meshset " << ii; geode_meshset->setName( strs_item_meshset_name.str().c_str() ); #endif } // create shape for points const std::vector<shared_ptr<carve::input::VertexData> >& vertex_points = item_shape->getVertexPoints(); for( size_t ii = 0; ii < vertex_points.size(); ++ii ) { const shared_ptr<carve::input::VertexData>& pointset_data = vertex_points[ii]; if( pointset_data ) { if( pointset_data->points.size() > 0 ) { osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } osg::ref_ptr<osg::Vec3Array> vertices = new osg::Vec3Array(); for( size_t i_pointset_point = 0; i_pointset_point < pointset_data->points.size(); ++i_pointset_point ) { vec3& carve_point = pointset_data->points[i_pointset_point]; vertices->push_back( osg::Vec3d( carve_point.x, carve_point.y, carve_point.z ) ); } osg::ref_ptr<osg::Geometry> geometry = new osg::Geometry(); geometry->setVertexArray( vertices ); geometry->addPrimitiveSet( new osg::DrawArrays( osg::PrimitiveSet::POINTS, 0, vertices->size() ) ); geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); geode->getOrCreateStateSet()->setAttribute( new osg::Point( 3.0f ), osg::StateAttribute::ON ); geode->addDrawable( geometry ); geode->setCullingActive( false ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", vertex_point " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } } } // create shape for polylines for( size_t ii = 0; ii < item_shape->m_polylines.size(); ++ii ) { shared_ptr<carve::input::PolylineSetData>& polyline_data = item_shape->m_polylines[ii]; osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ) { throw OutOfMemoryException( __FUNC__ ); } geode->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); drawPolyline( polyline_data.get(), geode ); item_group->addChild( geode ); #ifdef _DEBUG std::stringstream strs_item_meshset_name; strs_item_meshset_name << strs_item_name.str().c_str() << ", polylines " << ii; geode->setName( strs_item_meshset_name.str().c_str() ); #endif } if( m_geom_settings->isShowTextLiterals() ) { for( size_t ii = 0; ii < item_shape->m_vec_text_literals.size(); ++ii ) { shared_ptr<TextItemData>& text_data = item_shape->m_vec_text_literals[ii]; if( !text_data ) { continue; } carve::math::Matrix& text_pos = text_data->m_text_position; // TODO: handle rotation std::string text_str; text_str.assign( text_data->m_text.begin(), text_data->m_text.end() ); osg::Vec3 pos2( text_pos._41, text_pos._42, text_pos._43 ); osg::ref_ptr<osgText::Text> txt = new osgText::Text(); if( !txt ) { throw OutOfMemoryException( __FUNC__ ); } txt->setFont( "fonts/arial.ttf" ); txt->setColor( osg::Vec4f( 0, 0, 0, 1 ) ); txt->setCharacterSize( 0.1f ); txt->setAutoRotateToScreen( true ); txt->setPosition( pos2 ); txt->setText( text_str.c_str() ); txt->getOrCreateStateSet()->setMode( GL_LIGHTING, osg::StateAttribute::OFF ); osg::ref_ptr<osg::Geode> geode = new osg::Geode(); if( !geode ){ throw OutOfMemoryException( __FUNC__ ); } geode->addDrawable( txt ); item_group->addChild( geode ); } } // apply statesets if there are any if( item_shape->m_vec_item_appearances.size() > 0 ) { applyAppearancesToGroup( item_shape->m_vec_item_appearances, item_group ); } // If anything has been created, add it to the representation group if( item_group->getNumChildren() > 0 ) { #ifdef _DEBUG if( item_group->getNumParents() > 0 ) { std::cout << __FUNC__ << ": item_group->getNumParents() > 0" << std::endl; } #endif representation_switch->addChild( item_group ); } } // apply statesets if there are any if( product_representation_data->m_vec_representation_appearances.size() > 0 ) { applyAppearancesToGroup( product_representation_data->m_vec_representation_appearances, representation_switch ); } // If anything has been created, add it to the product group if( representation_switch->getNumChildren() > 0 ) { #ifdef _DEBUG if( representation_switch->getNumParents() > 0 ) { std::cout << __FUNC__ << ": product_representation_switch->getNumParents() > 0" << std::endl; } #endif // enable transparency for certain objects if( dynamic_pointer_cast<IfcSpace>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); } else if( dynamic_pointer_cast<IfcCurtainWall>(ifc_product) || dynamic_pointer_cast<IfcWindow>(ifc_product) ) { representation_switch->setStateSet( m_glass_stateset ); SceneGraphUtils::setMaterialAlpha( representation_switch, 0.6f, true ); } // check if parent building element is window if( ifc_product->m_Decomposes_inverse.size() > 0 ) { for( size_t ii_decomposes = 0; ii_decomposes < ifc_product->m_Decomposes_inverse.size(); ++ii_decomposes ) { const weak_ptr<IfcRelAggregates>& decomposes_weak = ifc_product->m_Decomposes_inverse[ii_decomposes]; if( decomposes_weak.expired() ) { continue; } shared_ptr<IfcRelAggregates> decomposes_ptr(decomposes_weak); shared_ptr<IfcObjectDefinition>& relating_object = decomposes_ptr->m_RelatingObject; if( relating_object ) { if( dynamic_pointer_cast<IfcCurtainWall>(relating_object) || dynamic_pointer_cast<IfcWindow>(relating_object) ) { representation_switch->setStateSet(m_glass_stateset); SceneGraphUtils::setMaterialAlpha(representation_switch, 0.6f, true); } } } } map_representation_switches.insert( std::make_pair( representation_id, representation_switch ) ); } } // TODO: if no color or material is given, set color 231/219/169 for walls, 140/140/140 for slabs } /*\brief method convertToOSG: Creates geometry for OpenSceneGraph from given ProductShapeData. \param[out] parent_group Group to append the geometry. **/ void convertToOSG( const std::map<std::string, shared_ptr<ProductShapeData> >& map_shape_data, osg::ref_ptr<osg::Switch> parent_group ) { progressTextCallback( L"Converting geometry to OpenGL format ..." ); progressValueCallback( 0, "scenegraph" ); m_map_entity_guid_to_switch.clear(); m_map_representation_id_to_switch.clear(); m_vec_existing_statesets.clear(); shared_ptr<ProductShapeData> ifc_project_data; std::vector<shared_ptr<ProductShapeData> > vec_products; for( auto it = map_shape_data.begin(); it != map_shape_data.end(); ++it ) { shared_ptr<ProductShapeData> shape_data = it->second; if( shape_data ) { vec_products.push_back( shape_data ); } } // create geometry for for each IfcProduct independently, spatial structure will be resolved later std::map<std::string, osg::ref_ptr<osg::Switch> >* map_entity_guid = &m_map_entity_guid_to_switch; std::map<int, osg::ref_ptr<osg::Switch> >* map_representations = &m_map_representation_id_to_switch; const int num_products = (int)vec_products.size(); #ifdef ENABLE_OPENMP Mutex writelock_map; Mutex writelock_message_callback; Mutex writelock_ifc_project; #pragma omp parallel firstprivate(num_products) shared(map_entity_guid, map_representations) { // time for one product may vary significantly, so schedule not so many #pragma omp for schedule(dynamic,40) #endif for( int i = 0; i < num_products; ++i ) { shared_ptr<ProductShapeData>& shape_data = vec_products[i]; weak_ptr<IfcObjectDefinition>& ifc_object_def_weak = shape_data->m_ifc_object_definition; if( ifc_object_def_weak.expired() ) { continue; } shared_ptr<IfcObjectDefinition> ifc_object_def( ifc_object_def_weak ); std::stringstream thread_err; if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def) ) { // geometry will be created in method subtractOpenings continue; } else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_ifc_project ); #endif ifc_project_data = shape_data; } shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def); if( !ifc_product ) { continue; } if( !ifc_product->m_Representation ) { continue; } const int product_id = ifc_product->m_entity_id; std::string product_guid; std::map<int, osg::ref_ptr<osg::Switch> > map_representation_switches; try { convertProductShapeToOSG( shape_data, map_representation_switches ); } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { thread_err << e.what(); } catch( carve::exception& e ) { thread_err << e.str(); } catch( std::exception& e ) { thread_err << e.what(); } catch( ... ) { thread_err << "undefined error, product id " << product_id; } if (ifc_object_def->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; product_guid = converterX.to_bytes(ifc_object_def->m_GlobalId->m_value); } if( map_representation_switches.size() > 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); osg::ref_ptr<osg::MatrixTransform> product_transform = new osg::MatrixTransform(); product_transform->setMatrix( convertMatrixToOSG( shape_data->getTransform() ) ); product_switch->addChild( product_transform ); std::stringstream strs_product_switch_name; strs_product_switch_name << product_guid << ":" << ifc_product->className() << " group"; product_switch->setName( strs_product_switch_name.str().c_str() ); for( auto it_map = map_representation_switches.begin(); it_map != map_representation_switches.end(); ++it_map ) { osg::ref_ptr<osg::Switch>& repres_switch = it_map->second; product_transform->addChild( repres_switch ); } // apply statesets if there are any const std::vector<shared_ptr<AppearanceData> >& vec_product_appearances = shape_data->getAppearances(); if( vec_product_appearances.size() > 0 ) { applyAppearancesToGroup( vec_product_appearances, product_switch ); } #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_map ); #endif map_entity_guid->insert(std::make_pair(product_guid, product_switch)); map_representations->insert( map_representation_switches.begin(), map_representation_switches.end() ); } if( thread_err.tellp() > 0 ) { #ifdef ENABLE_OPENMP ScopedLock scoped_lock( writelock_message_callback ); #endif messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } // progress callback double progress = (double)i / (double)num_products; if( progress - m_recent_progress > 0.02 ) { #ifdef ENABLE_OPENMP if( omp_get_thread_num() == 0 ) #endif { // leave 10% of progress to openscenegraph internals progressValueCallback( progress*0.9, "scenegraph" ); m_recent_progress = progress; } } } #ifdef ENABLE_OPENMP } // implicit barrier #endif try { // now resolve spatial structure if( ifc_project_data ) { resolveProjectStructure( ifc_project_data, parent_group ); } } catch( OutOfMemoryException& e ) { throw e; } catch( BuildingException& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( std::exception& e ) { messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" ); } catch( ... ) { messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ ); } progressValueCallback( 0.9, "scenegraph" ); } void addNodes( const std::map<std::string, shared_ptr<BuildingObject> >& map_shape_data, osg::ref_ptr<osg::Switch>& target_group ) { // check if there are entities that are not in spatial structure if( !target_group ) { target_group = new osg::Switch(); } for( auto it_product_shapes = map_shape_data.begin(); it_product_shapes != map_shape_data.end(); ++it_product_shapes ) { std::string product_guid = it_product_shapes->first; auto it_find = m_map_entity_guid_to_switch.find(product_guid); if( it_find != m_map_entity_guid_to_switch.end() ) { osg::ref_ptr<osg::Switch>& sw = it_find->second; if( sw ) { target_group->addChild( sw ); } } } } void resolveProjectStructure( const shared_ptr<ProductShapeData>& product_data, osg::ref_ptr<osg::Switch> group ) { if( !product_data ) { return; } if( product_data->m_ifc_object_definition.expired() ) { return; } shared_ptr<IfcObjectDefinition> object_def( product_data->m_ifc_object_definition ); std::string guid; if (object_def->m_GlobalId) { std::wstring_convert<std::codecvt_utf8<wchar_t>, wchar_t> converterX; guid = converterX.to_bytes(object_def->m_GlobalId->m_value); } if( SceneGraphUtils::inParentList(guid, group ) ) { messageCallback( "Cycle in project structure detected", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__, object_def.get() ); return; } const std::vector<shared_ptr<ProductShapeData> >& vec_children = product_data->getChildren(); for( size_t ii = 0; ii < vec_children.size(); ++ii ) { const shared_ptr<ProductShapeData>& child_product_data = vec_children[ii]; if( !child_product_data ) { continue; } osg::ref_ptr<osg::Switch> group_subparts = new osg::Switch(); if( !child_product_data->m_ifc_object_definition.expired() ) { shared_ptr<IfcObjectDefinition> child_obj_def( child_product_data->m_ifc_object_definition ); std::stringstream group_subparts_name; group_subparts_name << "#" << child_obj_def->m_entity_id << "="; group_subparts_name << child_obj_def->className(); group_subparts->setName( group_subparts_name.str().c_str() ); } group->addChild( group_subparts ); resolveProjectStructure( child_product_data, group_subparts ); } auto it_product_map = m_map_entity_guid_to_switch.find(guid); if( it_product_map != m_map_entity_guid_to_switch.end() ) { const osg::ref_ptr<osg::Switch>& product_switch = it_product_map->second; if( product_switch ) { group->addChild( product_switch ); } } else { if( group->getNumChildren() == 0 ) { osg::ref_ptr<osg::Switch> product_switch = new osg::Switch(); group->addChild( product_switch ); std::stringstream switch_name; switch_name << guid << ":" << object_def->className(); product_switch->setName( switch_name.str().c_str() ); } } } void clearAppearanceCache() { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif m_vec_existing_statesets.clear(); } void convertToOSGStateSet( const shared_ptr<AppearanceData>& appearence, osg::ref_ptr<osg::StateSet>& target_stateset ) { if( !appearence ) { return; } const float shininess = appearence->m_shininess; const float transparency = appearence->m_transparency; const bool set_transparent = appearence->m_set_transparent; const float color_ambient_r = appearence->m_color_ambient.r(); const float color_ambient_g = appearence->m_color_ambient.g(); const float color_ambient_b = appearence->m_color_ambient.b(); const float color_ambient_a = appearence->m_color_ambient.a(); const float color_diffuse_r = appearence->m_color_diffuse.r(); const float color_diffuse_g = appearence->m_color_diffuse.g(); const float color_diffuse_b = appearence->m_color_diffuse.b(); const float color_diffuse_a = appearence->m_color_diffuse.a(); const float color_specular_r = appearence->m_color_specular.r(); const float color_specular_g = appearence->m_color_specular.g(); const float color_specular_b = appearence->m_color_specular.b(); const float color_specular_a = appearence->m_color_specular.a(); if( m_enable_stateset_caching ) { #ifdef ENABLE_OPENMP ScopedLock lock( m_writelock_appearance_cache ); #endif for( size_t i = 0; i<m_vec_existing_statesets.size(); ++i ) { const osg::ref_ptr<osg::StateSet> stateset_existing = m_vec_existing_statesets[i]; if( !stateset_existing.valid() ) { continue; } osg::ref_ptr<osg::Material> mat_existing = (osg::Material*)stateset_existing->getAttribute( osg::StateAttribute::MATERIAL ); if( !mat_existing ) { continue; } // compare osg::Vec4f color_ambient_existing = mat_existing->getAmbient( osg::Material::FRONT_AND_BACK ); if( fabs( color_ambient_existing.r() - color_ambient_r ) > 0.03 ) break; if( fabs( color_ambient_existing.g() - color_ambient_g ) > 0.03 ) break; if( fabs( color_ambient_existing.b() - color_ambient_b ) > 0.03 ) break; if( fabs( color_ambient_existing.a() - color_ambient_a ) > 0.03 ) break; osg::Vec4f color_diffuse_existing = mat_existing->getDiffuse( osg::Material::FRONT_AND_BACK ); if( fabs( color_diffuse_existing.r() - color_diffuse_r ) > 0.03 ) break; if( fabs( color_diffuse_existing.g() - color_diffuse_g ) > 0.03 ) break; if( fabs( color_diffuse_existing.b() - color_diffuse_b ) > 0.03 ) break; if( fabs( color_diffuse_existing.a() - color_diffuse_a ) > 0.03 ) break; osg::Vec4f color_specular_existing = mat_existing->getSpecular( osg::Material::FRONT_AND_BACK ); if( fabs( color_specular_existing.r() - color_specular_r ) > 0.03 ) break; if( fabs( color_specular_existing.g() - color_specular_g ) > 0.03 ) break; if( fabs( color_specular_existing.b() - color_specular_b ) > 0.03 ) break; if( fabs( color_specular_existing.a() - color_specular_a ) > 0.03 ) break; float shininess_existing = mat_existing->getShininess( osg::Material::FRONT_AND_BACK ); if( fabs( shininess_existing - shininess ) > 0.03 ) break; bool blend_on_existing = stateset_existing->getMode( GL_BLEND ) == osg::StateAttribute::ON; if( blend_on_existing != set_transparent ) break; bool transparent_bin = stateset_existing->getRenderingHint() == osg::StateSet::TRANSPARENT_BIN; if( transparent_bin != set_transparent ) break; // if we get here, appearance is same as existing state set // TODO: block this re-used stateset for merging, or prevent merged statesets from being re-used target_stateset = stateset_existing; return; } } osg::Vec4f ambientColor( color_ambient_r, color_ambient_g, color_ambient_b, transparency ); osg::Vec4f diffuseColor( color_diffuse_r, color_diffuse_g, color_diffuse_b, transparency ); osg::Vec4f specularColor( color_specular_r, color_specular_g, color_specular_b, transparency ); // TODO: material caching and re-use osg::ref_ptr<osg::Material> mat = new osg::Material(); if( !mat ){ throw OutOfMemoryException(); } mat->setAmbient( osg::Material::FRONT_AND_BACK, ambientColor ); mat->setDiffuse( osg::Material::FRONT_AND_BACK, diffuseColor ); mat->setSpecular( osg::Material::FRONT_AND_BACK, specularColor ); mat->setShininess( osg::Material::FRONT_AND_BACK, shininess ); mat->setColorMode( osg::Material::SPECULAR ); target_stateset = new osg::StateSet(); if( !target_stateset ){ throw OutOfMemoryException(); } target_stateset->setAttribute( mat, osg::StateAttribute::ON ); if( appearence->m_set_transparent ) { mat->setTransparency( osg::Material::FRONT_AND_BACK, transparency ); target_stateset->setMode( GL_BLEND, osg::StateAttribute::ON ); target_stateset->setRenderingHint( osg::StateSet::TRANSPARENT_BIN ); } if( appearence->m_specular_exponent != 0.f ) { //osg::ref_ptr<osgFX::SpecularHighlights> spec_highlights = new osgFX::SpecularHighlights(); //spec_highlights->setSpecularExponent( spec->m_value ); // todo: add to scenegraph } if( m_enable_stateset_caching ) { m_vec_existing_statesets.push_back( target_stateset ); } } };
convolution_3x3_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4to1_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4a-inch/4a-64-outch; #if __aarch64__ kernel_tm_pack4.create(8 * inch / 4, 64, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)4u * 4, 4); #else kernel_tm_pack4.create(4 * inch / 4, 64, outch / 4 + outch % 4, (size_t)4u * 4, 4); #endif int p = 0; #if __aarch64__ for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack4.channel(p / 8); for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); const float* k10 = k1.row(q); const float* k11 = k1.row(q + 1); const float* k12 = k1.row(q + 2); const float* k13 = k1.row(q + 3); const float* k20 = k2.row(q); const float* k21 = k2.row(q + 1); const float* k22 = k2.row(q + 2); const float* k23 = k2.row(q + 3); const float* k30 = k3.row(q); const float* k31 = k3.row(q + 1); const float* k32 = k3.row(q + 2); const float* k33 = k3.row(q + 3); const float* k40 = k4.row(q); const float* k41 = k4.row(q + 1); const float* k42 = k4.row(q + 2); const float* k43 = k4.row(q + 3); const float* k50 = k5.row(q); const float* k51 = k5.row(q + 1); const float* k52 = k5.row(q + 2); const float* k53 = k5.row(q + 3); const float* k60 = k6.row(q); const float* k61 = k6.row(q + 1); const float* k62 = k6.row(q + 2); const float* k63 = k6.row(q + 3); const float* k70 = k7.row(q); const float* k71 = k7.row(q + 1); const float* k72 = k7.row(q + 2); const float* k73 = k7.row(q + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; p + 3 < outch; p += 4) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4); #else Mat g0 = kernel_tm_pack4.channel(p / 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); const float* k10 = k1.row(q); const float* k11 = k1.row(q + 1); const float* k12 = k1.row(q + 2); const float* k13 = k1.row(q + 3); const float* k20 = k2.row(q); const float* k21 = k2.row(q + 1); const float* k22 = k2.row(q + 2); const float* k23 = k2.row(q + 3); const float* k30 = k3.row(q); const float* k31 = k3.row(q + 1); const float* k32 = k3.row(q + 2); const float* k33 = k3.row(q + 3); g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); #if __aarch64__ Mat g0 = kernel_tm_pack4.channel(p / 8 + (p % 8) / 4 + p % 4); #else Mat g0 = kernel_tm_pack4.channel(p / 4 + p % 4); #endif for (int k = 0; k < 64; k++) { float* g00 = g0.row(k); for (int q = 0; q + 3 < inch; q += 4) { const float* k00 = k0.row(q); const float* k01 = k0.row(q + 1); const float* k02 = k0.row(q + 2); const float* k03 = k0.row(q + 3); g00[0] = k00[k]; g00[1] = k01[k]; g00[2] = k02[k]; g00[3] = k03[k]; g00 += 4; } } } } static void conv3x3s1_winograd64_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + tiles % 12 % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, elemsize, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { float* tm2p = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n" "sub %0, %0, #128 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v19.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); r0 += bottom_blob_tm.cstep * 4; } } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8); #else float* tm2p = tm2.row(i / 8); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "sub %0, %0, #64 \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d16-d19}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d20-d23}, [%0 :128] \n" "sub %0, %0, #96 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d20-d21}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" "vst1.f32 {d22-d23}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #256] \n" "vld4.f32 {d0-d3}, [%0 :128]! \n" "pld [%0, #256] \n" "vld4.f32 {d4-d7}, [%0 :128] \n" "sub %0, %0, #32 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d4-d5}, [%1 :128]! \n" "vst1.f32 {d2-d3}, [%1 :128]! \n" "vst1.f32 {d6-d7}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { #if __aarch64__ float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; #if __aarch64__ nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); float* output4_tm = top_blob_tm.channel(p + 4); float* output5_tm = top_blob_tm.channel(p + 5); float* output6_tm = top_blob_tm.channel(p + 6); float* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v23.4s, v0.4s, v5.s[1] \n" "fmla v26.4s, v0.4s, v5.s[2] \n" "fmla v29.4s, v0.4s, v5.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v21.4s, v1.4s, v5.s[0] \n" "fmla v24.4s, v1.4s, v5.s[1] \n" "fmla v27.4s, v1.4s, v5.s[2] \n" "fmla v30.4s, v1.4s, v5.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "fmla v22.4s, v2.4s, v5.s[0] \n" "fmla v25.4s, v2.4s, v5.s[1] \n" "fmla v28.4s, v2.4s, v5.s[2] \n" "fmla v31.4s, v2.4s, v5.s[3] \n" "fmla v8.4s, v3.4s, v6.s[0] \n" "fmla v11.4s, v3.4s, v6.s[1] \n" "fmla v14.4s, v3.4s, v6.s[2] \n" "fmla v17.4s, v3.4s, v6.s[3] \n" "fmla v20.4s, v3.4s, v7.s[0] \n" "fmla v23.4s, v3.4s, v7.s[1] \n" "fmla v26.4s, v3.4s, v7.s[2] \n" "fmla v29.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v9.4s, v0.4s, v6.s[0] \n" "fmla v12.4s, v0.4s, v6.s[1] \n" "fmla v15.4s, v0.4s, v6.s[2] \n" "fmla v18.4s, v0.4s, v6.s[3] \n" "fmla v21.4s, v0.4s, v7.s[0] \n" "fmla v24.4s, v0.4s, v7.s[1] \n" "fmla v27.4s, v0.4s, v7.s[2] \n" "fmla v30.4s, v0.4s, v7.s[3] \n" "fmla v10.4s, v1.4s, v6.s[0] \n" "fmla v13.4s, v1.4s, v6.s[1] \n" "fmla v16.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v22.4s, v1.4s, v7.s[0] \n" "fmla v25.4s, v1.4s, v7.s[1] \n" "fmla v28.4s, v1.4s, v7.s[2] \n" "fmla v31.4s, v1.4s, v7.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "fmla v8.4s, v2.4s, v4.s[0] \n" "fmla v11.4s, v2.4s, v4.s[1] \n" "fmla v14.4s, v2.4s, v4.s[2] \n" "fmla v17.4s, v2.4s, v4.s[3] \n" "fmla v20.4s, v2.4s, v5.s[0] \n" "fmla v23.4s, v2.4s, v5.s[1] \n" "fmla v26.4s, v2.4s, v5.s[2] \n" "fmla v29.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v4.s[0] \n" "fmla v12.4s, v3.4s, v4.s[1] \n" "fmla v15.4s, v3.4s, v4.s[2] \n" "fmla v18.4s, v3.4s, v4.s[3] \n" "fmla v21.4s, v3.4s, v5.s[0] \n" "fmla v24.4s, v3.4s, v5.s[1] \n" "fmla v27.4s, v3.4s, v5.s[2] \n" "fmla v30.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v10.4s, v0.4s, v4.s[0] \n" "fmla v13.4s, v0.4s, v4.s[1] \n" "fmla v16.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v22.4s, v0.4s, v5.s[0] \n" "fmla v25.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v31.4s, v0.4s, v5.s[3] \n" "fmla v8.4s, v1.4s, v6.s[0] \n" "fmla v11.4s, v1.4s, v6.s[1] \n" "fmla v14.4s, v1.4s, v6.s[2] \n" "fmla v17.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v23.4s, v1.4s, v7.s[1] \n" "fmla v26.4s, v1.4s, v7.s[2] \n" "fmla v29.4s, v1.4s, v7.s[3] \n" "fmla v9.4s, v2.4s, v6.s[0] \n" "fmla v12.4s, v2.4s, v6.s[1] \n" "fmla v15.4s, v2.4s, v6.s[2] \n" "fmla v18.4s, v2.4s, v6.s[3] \n" "fmla v21.4s, v2.4s, v7.s[0] \n" "fmla v24.4s, v2.4s, v7.s[1] \n" "fmla v27.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v10.4s, v3.4s, v6.s[0] \n" "fmla v13.4s, v3.4s, v6.s[1] \n" "fmla v16.4s, v3.4s, v6.s[2] \n" "fmla v19.4s, v3.4s, v6.s[3] \n" "fmla v22.4s, v3.4s, v7.s[0] \n" "fmla v25.4s, v3.4s, v7.s[1] \n" "fmla v28.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" "st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n" "st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n" "st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n" "st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v18.4s, v0.4s, v4.s[1] \n" "fmla v20.4s, v0.4s, v4.s[2] \n" "fmla v22.4s, v0.4s, v4.s[3] \n" "fmla v24.4s, v0.4s, v5.s[0] \n" "fmla v26.4s, v0.4s, v5.s[1] \n" "fmla v28.4s, v0.4s, v5.s[2] \n" "fmla v30.4s, v0.4s, v5.s[3] \n" "fmla v17.4s, v1.4s, v4.s[0] \n" "fmla v19.4s, v1.4s, v4.s[1] \n" "fmla v21.4s, v1.4s, v4.s[2] \n" "fmla v23.4s, v1.4s, v4.s[3] \n" "fmla v25.4s, v1.4s, v5.s[0] \n" "fmla v27.4s, v1.4s, v5.s[1] \n" "fmla v29.4s, v1.4s, v5.s[2] \n" "fmla v31.4s, v1.4s, v5.s[3] \n" "fmla v16.4s, v2.4s, v6.s[0] \n" "fmla v18.4s, v2.4s, v6.s[1] \n" "fmla v20.4s, v2.4s, v6.s[2] \n" "fmla v22.4s, v2.4s, v6.s[3] \n" "fmla v24.4s, v2.4s, v7.s[0] \n" "fmla v26.4s, v2.4s, v7.s[1] \n" "fmla v28.4s, v2.4s, v7.s[2] \n" "fmla v30.4s, v2.4s, v7.s[3] \n" "fmla v17.4s, v3.4s, v6.s[0] \n" "fmla v19.4s, v3.4s, v6.s[1] \n" "fmla v21.4s, v3.4s, v6.s[2] \n" "fmla v23.4s, v3.4s, v6.s[3] \n" "fmla v25.4s, v3.4s, v7.s[0] \n" "fmla v27.4s, v3.4s, v7.s[1] \n" "fmla v29.4s, v3.4s, v7.s[2] \n" "fmla v31.4s, v3.4s, v7.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v12.4s, v8.s[0] \n" "fmla v18.4s, v12.4s, v8.s[1] \n" "fmla v20.4s, v12.4s, v8.s[2] \n" "fmla v22.4s, v12.4s, v8.s[3] \n" "fmla v24.4s, v12.4s, v9.s[0] \n" "fmla v26.4s, v12.4s, v9.s[1] \n" "fmla v28.4s, v12.4s, v9.s[2] \n" "fmla v30.4s, v12.4s, v9.s[3] \n" "fmla v17.4s, v13.4s, v8.s[0] \n" "fmla v19.4s, v13.4s, v8.s[1] \n" "fmla v21.4s, v13.4s, v8.s[2] \n" "fmla v23.4s, v13.4s, v8.s[3] \n" "fmla v25.4s, v13.4s, v9.s[0] \n" "fmla v27.4s, v13.4s, v9.s[1] \n" "fmla v29.4s, v13.4s, v9.s[2] \n" "fmla v31.4s, v13.4s, v9.s[3] \n" "fmla v16.4s, v14.4s, v10.s[0] \n" "fmla v18.4s, v14.4s, v10.s[1] \n" "fmla v20.4s, v14.4s, v10.s[2] \n" "fmla v22.4s, v14.4s, v10.s[3] \n" "fmla v24.4s, v14.4s, v11.s[0] \n" "fmla v26.4s, v14.4s, v11.s[1] \n" "fmla v28.4s, v14.4s, v11.s[2] \n" "fmla v30.4s, v14.4s, v11.s[3] \n" "fmla v17.4s, v15.4s, v10.s[0] \n" "fmla v19.4s, v15.4s, v10.s[1] \n" "fmla v21.4s, v15.4s, v10.s[2] \n" "fmla v23.4s, v15.4s, v10.s[3] \n" "fmla v25.4s, v15.4s, v11.s[0] \n" "fmla v27.4s, v15.4s, v11.s[1] \n" "fmla v29.4s, v15.4s, v11.s[2] \n" "fmla v31.4s, v15.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" "st1 {v20.4s, v21.4s}, [%3], #32 \n" "st1 {v22.4s, v23.4s}, [%4], #32 \n" "st1 {v24.4s, v25.4s}, [%5], #32 \n" "st1 {v26.4s, v27.4s}, [%6], #32 \n" "st1 {v28.4s, v29.4s}, [%7], #32 \n" "st1 {v30.4s, v31.4s}, [%8], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v0.4s, v4.s[0] \n" "fmla v17.4s, v0.4s, v4.s[1] \n" "fmla v18.4s, v0.4s, v4.s[2] \n" "fmla v19.4s, v0.4s, v4.s[3] \n" "fmla v20.4s, v0.4s, v5.s[0] \n" "fmla v21.4s, v0.4s, v5.s[1] \n" "fmla v22.4s, v0.4s, v5.s[2] \n" "fmla v23.4s, v0.4s, v5.s[3] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v1.4s, v6.s[0] \n" "fmla v17.4s, v1.4s, v6.s[1] \n" "fmla v18.4s, v1.4s, v6.s[2] \n" "fmla v19.4s, v1.4s, v6.s[3] \n" "fmla v20.4s, v1.4s, v7.s[0] \n" "fmla v21.4s, v1.4s, v7.s[1] \n" "fmla v22.4s, v1.4s, v7.s[2] \n" "fmla v23.4s, v1.4s, v7.s[3] \n" "fmla v16.4s, v2.4s, v8.s[0] \n" "fmla v17.4s, v2.4s, v8.s[1] \n" "fmla v18.4s, v2.4s, v8.s[2] \n" "fmla v19.4s, v2.4s, v8.s[3] \n" "fmla v20.4s, v2.4s, v9.s[0] \n" "fmla v21.4s, v2.4s, v9.s[1] \n" "fmla v22.4s, v2.4s, v9.s[2] \n" "fmla v23.4s, v2.4s, v9.s[3] \n" "fmla v16.4s, v3.4s, v10.s[0] \n" "fmla v17.4s, v3.4s, v10.s[1] \n" "fmla v18.4s, v3.4s, v10.s[2] \n" "fmla v19.4s, v3.4s, v10.s[3] \n" "fmla v20.4s, v3.4s, v11.s[0] \n" "fmla v21.4s, v3.4s, v11.s[1] \n" "fmla v22.4s, v3.4s, v11.s[2] \n" "fmla v23.4s, v3.4s, v11.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" "st1 {v18.4s}, [%3], #16 \n" "st1 {v19.4s}, [%4], #16 \n" "st1 {v20.4s}, [%5], #16 \n" "st1 {v21.4s}, [%6], #16 \n" "st1 {v22.4s}, [%7], #16 \n" "st1 {v23.4s}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.4s}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v5.4s, v0.s[0] \n" "fmla v18.4s, v6.4s, v0.s[1] \n" "fmla v19.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n" "fmla v16.4s, v8.4s, v0.s[2] \n" "fmla v17.4s, v9.4s, v0.s[2] \n" "fmla v18.4s, v10.4s, v0.s[3] \n" "fmla v19.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.s}[0], [%1], #4 \n" "st1 {v16.s}[1], [%2], #4 \n" "st1 {v16.s}[2], [%3], #4 \n" "st1 {v16.s}[3], [%4], #4 \n" "st1 {v17.s}[0], [%5], #4 \n" "st1 {v17.s}[1], [%6], #4 \n" "st1 {v17.s}[2], [%7], #4 \n" "st1 {v17.s}[3], [%8], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); } } } remain_outch_start += nn_outch << 3; nn_outch = (outch - remain_outch_start) >> 2; #else // __aarch64__ nn_outch = outch >> 2; #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p + 1); float* output2_tm = top_blob_tm.channel(p + 2); float* output3_tm = top_blob_tm.channel(p + 3); #if __aarch64__ const Mat kernel01_tm = kernel_tm.channel(p / 8 + (p % 8) / 4); #else const Mat kernel01_tm = kernel_tm.channel(p / 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v11.4s, v0.4s, v4.s[1] \n" "fmla v14.4s, v0.4s, v4.s[2] \n" "fmla v17.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v12.4s, v1.4s, v4.s[1] \n" "fmla v15.4s, v1.4s, v4.s[2] \n" "fmla v18.4s, v1.4s, v4.s[3] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "fmla v13.4s, v2.4s, v4.s[1] \n" "fmla v16.4s, v2.4s, v4.s[2] \n" "fmla v19.4s, v2.4s, v4.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n" "fmla v8.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v14.4s, v3.4s, v5.s[2] \n" "fmla v17.4s, v3.4s, v5.s[3] \n" "fmla v9.4s, v20.4s, v5.s[0] \n" "fmla v12.4s, v20.4s, v5.s[1] \n" "fmla v15.4s, v20.4s, v5.s[2] \n" "fmla v18.4s, v20.4s, v5.s[3] \n" "fmla v10.4s, v21.4s, v5.s[0] \n" "fmla v13.4s, v21.4s, v5.s[1] \n" "fmla v16.4s, v21.4s, v5.s[2] \n" "fmla v19.4s, v21.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n" "fmla v8.4s, v22.4s, v6.s[0] \n" "fmla v11.4s, v22.4s, v6.s[1] \n" "fmla v14.4s, v22.4s, v6.s[2] \n" "fmla v17.4s, v22.4s, v6.s[3] \n" "fmla v9.4s, v23.4s, v6.s[0] \n" "fmla v12.4s, v23.4s, v6.s[1] \n" "fmla v15.4s, v23.4s, v6.s[2] \n" "fmla v18.4s, v23.4s, v6.s[3] \n" "fmla v10.4s, v24.4s, v6.s[0] \n" "fmla v13.4s, v24.4s, v6.s[1] \n" "fmla v16.4s, v24.4s, v6.s[2] \n" "fmla v19.4s, v24.4s, v6.s[3] \n" "fmla v8.4s, v25.4s, v7.s[0] \n" "fmla v11.4s, v25.4s, v7.s[1] \n" "fmla v14.4s, v25.4s, v7.s[2] \n" "fmla v17.4s, v25.4s, v7.s[3] \n" "fmla v9.4s, v26.4s, v7.s[0] \n" "fmla v12.4s, v26.4s, v7.s[1] \n" "fmla v15.4s, v26.4s, v7.s[2] \n" "fmla v18.4s, v26.4s, v7.s[3] \n" "fmla v10.4s, v27.4s, v7.s[0] \n" "fmla v13.4s, v27.4s, v7.s[1] \n" "fmla v16.4s, v27.4s, v7.s[2] \n" "fmla v19.4s, v27.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" "st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n" "st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n" "st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __aarch64__ for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v10.4s, v0.4s, v4.s[1] \n" "fmla v12.4s, v0.4s, v4.s[2] \n" "fmla v14.4s, v0.4s, v4.s[3] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v11.4s, v1.4s, v4.s[1] \n" "fmla v13.4s, v1.4s, v4.s[2] \n" "fmla v15.4s, v1.4s, v4.s[3] \n" "fmla v8.4s, v2.4s, v5.s[0] \n" "fmla v10.4s, v2.4s, v5.s[1] \n" "fmla v12.4s, v2.4s, v5.s[2] \n" "fmla v14.4s, v2.4s, v5.s[3] \n" "fmla v9.4s, v3.4s, v5.s[0] \n" "fmla v11.4s, v3.4s, v5.s[1] \n" "fmla v13.4s, v3.4s, v5.s[2] \n" "fmla v15.4s, v3.4s, v5.s[3] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n" "fmla v8.4s, v16.4s, v6.s[0] \n" "fmla v10.4s, v16.4s, v6.s[1] \n" "fmla v12.4s, v16.4s, v6.s[2] \n" "fmla v14.4s, v16.4s, v6.s[3] \n" "fmla v9.4s, v17.4s, v6.s[0] \n" "fmla v11.4s, v17.4s, v6.s[1] \n" "fmla v13.4s, v17.4s, v6.s[2] \n" "fmla v15.4s, v17.4s, v6.s[3] \n" "fmla v8.4s, v18.4s, v7.s[0] \n" "fmla v10.4s, v18.4s, v7.s[1] \n" "fmla v12.4s, v18.4s, v7.s[2] \n" "fmla v14.4s, v18.4s, v7.s[3] \n" "fmla v9.4s, v19.4s, v7.s[0] \n" "fmla v11.4s, v19.4s, v7.s[1] \n" "fmla v13.4s, v19.4s, v7.s[2] \n" "fmla v15.4s, v19.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "st1 {v10.4s, v11.4s}, [%2], #32 \n" "st1 {v12.4s, v13.4s}, [%3], #32 \n" "st1 {v14.4s, v15.4s}, [%4], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "veor q12, q12 \n" "veor q13, q13 \n" "veor q14, q14 \n" "veor q15, q15 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q10, q0, d8[1] \n" "vmla.f32 q12, q0, d9[0] \n" "vmla.f32 q14, q0, d9[1] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q11, q1, d8[1] \n" "vmla.f32 q13, q1, d9[0] \n" "vmla.f32 q15, q1, d9[1] \n" "vmla.f32 q8, q2, d10[0] \n" "vmla.f32 q10, q2, d10[1] \n" "vmla.f32 q12, q2, d11[0] \n" "vmla.f32 q14, q2, d11[1] \n" "vmla.f32 q9, q3, d10[0] \n" "vmla.f32 q11, q3, d10[1] \n" "vmla.f32 q13, q3, d11[0] \n" "vmla.f32 q15, q3, d11[1] \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "vmla.f32 q8, q0, d12[0] \n" "vmla.f32 q10, q0, d12[1] \n" "vmla.f32 q12, q0, d13[0] \n" "vmla.f32 q14, q0, d13[1] \n" "vmla.f32 q9, q1, d12[0] \n" "vmla.f32 q11, q1, d12[1] \n" "vmla.f32 q13, q1, d13[0] \n" "vmla.f32 q15, q1, d13[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d14[0] \n" "vmla.f32 q10, q2, d14[1] \n" "vmla.f32 q12, q2, d15[0] \n" "vmla.f32 q14, q2, d15[1] \n" "vmla.f32 q9, q3, d14[0] \n" "vmla.f32 q11, q3, d14[1] \n" "vmla.f32 q13, q3, d15[0] \n" "vmla.f32 q15, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1]! \n" "vst1.f32 {d20-d23}, [%2]! \n" "vst1.f32 {d24-d27}, [%3]! \n" "vst1.f32 {d28-d31}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v0.4s, v4.s[1] \n" "fmla v10.4s, v0.4s, v4.s[2] \n" "fmla v11.4s, v0.4s, v4.s[3] \n" "fmla v8.4s, v1.4s, v5.s[0] \n" "fmla v9.4s, v1.4s, v5.s[1] \n" "fmla v10.4s, v1.4s, v5.s[2] \n" "fmla v11.4s, v1.4s, v5.s[3] \n" "fmla v8.4s, v2.4s, v6.s[0] \n" "fmla v9.4s, v2.4s, v6.s[1] \n" "fmla v10.4s, v2.4s, v6.s[2] \n" "fmla v11.4s, v2.4s, v6.s[3] \n" "fmla v8.4s, v3.4s, v7.s[0] \n" "fmla v9.4s, v3.4s, v7.s[1] \n" "fmla v10.4s, v3.4s, v7.s[2] \n" "fmla v11.4s, v3.4s, v7.s[3] \n" "bne 0b \n" "st1 {v8.4s}, [%1], #16 \n" "st1 {v9.4s}, [%2], #16 \n" "st1 {v10.4s}, [%3], #16 \n" "st1 {v11.4s}, [%4], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q0, d8[1] \n" "vmla.f32 q10, q0, d9[0] \n" "vmla.f32 q11, q0, d9[1] \n" "vmla.f32 q8, q1, d10[0] \n" "vmla.f32 q9, q1, d10[1] \n" "vmla.f32 q10, q1, d11[0] \n" "vmla.f32 q11, q1, d11[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q2, d12[0] \n" "vmla.f32 q9, q2, d12[1] \n" "vmla.f32 q10, q2, d13[0] \n" "vmla.f32 q11, q2, d13[1] \n" "vmla.f32 q8, q3, d14[0] \n" "vmla.f32 q9, q3, d14[1] \n" "vmla.f32 q10, q3, d15[0] \n" "vmla.f32 q11, q3, d15[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1]! \n" "vst1.f32 {d18-d19}, [%2]! \n" "vst1.f32 {d20-d21}, [%3]! \n" "vst1.f32 {d22-d23}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel01_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v7.4s, v0.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.s}[0], [%1], #4 \n" "st1 {v8.s}[1], [%2], #4 \n" "st1 {v8.s}[2], [%3], #4 \n" "st1 {v8.s}[3], [%4], #4 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5]! \n" "pld [%6, #512] \n" "vldm %6!, {d8-d15} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q7, d1[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16[0]}, [%1]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vst1.f32 {d17[0]}, [%3]! \n" "vst1.f32 {d17[1]}, [%4]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(r0), // %5 "=r"(kptr) // %6 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(r0), "6"(kptr) : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); #if __aarch64__ const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4); #endif for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v5.16b, v5.16b, v5.16b \n" "eor v6.16b, v6.16b, v6.16b \n" "eor v7.16b, v7.16b, v7.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v5.4s, v3.4s, v4.s[1] \n" "fmla v6.4s, v12.4s, v4.s[1] \n" "fmla v7.4s, v13.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" "fmla v8.4s, v14.4s, v4.s[2] \n" "fmla v9.4s, v15.4s, v4.s[2] \n" "fmla v10.4s, v16.4s, v4.s[2] \n" "fmla v5.4s, v17.4s, v4.s[3] \n" "fmla v6.4s, v18.4s, v4.s[3] \n" "fmla v7.4s, v19.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v5.4s \n" "fadd v9.4s, v9.4s, v6.4s \n" "fadd v10.4s, v10.4s, v7.4s \n" "st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } #endif for (; i + 7 < tiles; i += 8) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8); #else const float* r0 = bb2.row(i / 8); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[0] \n" "fmla v10.4s, v2.4s, v4.s[1] \n" "fmla v11.4s, v3.4s, v4.s[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n" "fmla v8.4s, v12.4s, v4.s[2] \n" "fmla v9.4s, v13.4s, v4.s[2] \n" "fmla v10.4s, v14.4s, v4.s[3] \n" "fmla v11.4s, v15.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[0] \n" "vmla.f32 q10, q2, d8[1] \n" "vmla.f32 q11, q3, d8[1] \n" "pld [%2, #512] \n" "vldm %2!, {d24-d31} \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q12, d9[0] \n" "vmla.f32 q9, q13, d9[0] \n" "vmla.f32 q10, q14, d9[1] \n" "vmla.f32 q11, q15, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q10 \n" "vadd.f32 q9, q9, q11 \n" "vst1.f32 {d16-d19}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ } for (; i + 3 < tiles; i += 4) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4); #endif const float* kptr = kernel0_tm.row(r); int nn = inch; // inch always > 0 #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v4.4s}, [%3], #16 \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v0.4s, v4.s[0] \n" "fmla v9.4s, v1.4s, v4.s[1] \n" "fmla v10.4s, v2.4s, v4.s[2] \n" "fmla v11.4s, v3.4s, v4.s[3] \n" "bne 0b \n" "fadd v8.4s, v8.4s, v9.4s \n" "fadd v10.4s, v10.4s, v11.4s \n" "fadd v8.4s, v8.4s, v10.4s \n" "st1 {v8.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8 \n" "veor q9, q9 \n" "veor q10, q10 \n" "veor q11, q11 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #128] \n" "vld1.f32 {d8-d9}, [%3]! \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q0, d8[0] \n" "vmla.f32 q9, q1, d8[1] \n" "vmla.f32 q10, q2, d9[0] \n" "vmla.f32 q11, q3, d9[1] \n" "bne 0b \n" "vadd.f32 q8, q8, q9 \n" "vadd.f32 q10, q10, q11 \n" "vadd.f32 q8, q8, q10 \n" "vst1.f32 {d16-d17}, [%1]! \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11"); #endif // __aarch64__ } for (; i < tiles; i++) { #if __aarch64__ const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4); #else const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); #endif const float* kptr = kernel0_tm.row(r); float32x4_t _sum0 = vdupq_n_f32(0.f); for (int q = 0; q < inch; q++) { float32x4_t _r0 = vld1q_f32(r0); float32x4_t _k0 = vld1q_f32(kptr); _sum0 = vmlaq_f32(_sum0, _r0, _k0); kptr += 4; r0 += 4; } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss2 = vpadd_f32(_ss, _ss); float sum0 = vget_lane_f32(_ss2, 0); #endif output0_tm[0] = sum0; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; // float32x2_t _bias0 = vdup_n_f32(bias0); float tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 1; const float* output0_tm_1 = output0_tm_0 + tiles * 1; const float* output0_tm_2 = output0_tm_0 + tiles * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 7; // TODO neon optimize for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 8; output0_tm_1 += tiles * 8; output0_tm_2 += tiles * 8; output0_tm_3 += tiles * 8; output0_tm_4 += tiles * 8; output0_tm_5 += tiles * 8; output0_tm_6 += tiles * 8; output0_tm_7 += tiles * 8; } float* output0 = out0.row(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_pack4to1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ int nn_outch = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p + 1); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00_0 = vld1q_f32(k0); float32x4_t _k01_0 = vld1q_f32(k0 + 4); float32x4_t _k02_0 = vld1q_f32(k0 + 8); float32x4_t _k10_0 = vld1q_f32(k0 + 12); float32x4_t _k11_0 = vld1q_f32(k0 + 16); float32x4_t _k12_0 = vld1q_f32(k0 + 20); float32x4_t _k20_0 = vld1q_f32(k0 + 24); float32x4_t _k21_0 = vld1q_f32(k0 + 28); float32x4_t _k22_0 = vld1q_f32(k0 + 32); float32x4_t _k00_1 = vld1q_f32(k1); float32x4_t _k01_1 = vld1q_f32(k1 + 4); float32x4_t _k02_1 = vld1q_f32(k1 + 8); float32x4_t _k10_1 = vld1q_f32(k1 + 12); float32x4_t _k11_1 = vld1q_f32(k1 + 16); float32x4_t _k12_1 = vld1q_f32(k1 + 20); float32x4_t _k20_1 = vld1q_f32(k1 + 24); float32x4_t _k21_1 = vld1q_f32(k1 + 28); float32x4_t _k22_1 = vld1q_f32(k1 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r00 r01 r02 r03 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %10.4s, v1.4s \n" "fmul v19.4s, %19.4s, v1.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4s, v5.4s}, [%2] \n" // r04 r05 "fmul v6.4s, %10.4s, v2.4s \n" "fmul v7.4s, %19.4s, v2.4s \n" "fmul v8.4s, %10.4s, v3.4s \n" "fmul v9.4s, %19.4s, v3.4s \n" "fmla v16.4s, %11.4s, v1.4s \n" "fmla v17.4s, %20.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %20.4s, v2.4s \n" "fmla v6.4s, %11.4s, v3.4s \n" "fmla v7.4s, %20.4s, v3.4s \n" "fmla v8.4s, %11.4s, v4.4s \n" "fmla v9.4s, %20.4s, v4.4s \n" "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %21.4s, v3.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r10 r11 r12 r12 "fmla v6.4s, %12.4s, v4.4s \n" "fmla v7.4s, %21.4s, v4.4s \n" "fmla v8.4s, %12.4s, v5.4s \n" "fmla v9.4s, %21.4s, v5.4s \n" "fmla v16.4s, %13.4s, v0.4s \n" "fmla v17.4s, %22.4s, v0.4s \n" "fmla v18.4s, %13.4s, v1.4s \n" "fmla v19.4s, %22.4s, v1.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4s, v5.4s}, [%3] \n" // r14 r15 "fmla v6.4s, %13.4s, v2.4s \n" "fmla v7.4s, %22.4s, v2.4s \n" "fmla v8.4s, %13.4s, v3.4s \n" "fmla v9.4s, %22.4s, v3.4s \n" "fmla v16.4s, %14.4s, v1.4s \n" "fmla v17.4s, %23.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %23.4s, v2.4s \n" "fmla v6.4s, %14.4s, v3.4s \n" "fmla v7.4s, %23.4s, v3.4s \n" "fmla v8.4s, %14.4s, v4.4s \n" "fmla v9.4s, %23.4s, v4.4s \n" "fmla v16.4s, %15.4s, v2.4s \n" "fmla v17.4s, %24.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %24.4s, v3.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4], #64 \n" // r20 r21 r22 r22 "fmla v6.4s, %15.4s, v4.4s \n" "fmla v7.4s, %24.4s, v4.4s \n" "fmla v8.4s, %15.4s, v5.4s \n" "fmla v9.4s, %24.4s, v5.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %16.4s, v1.4s \n" "fmla v19.4s, %25.4s, v1.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4] \n" // r24 r25 "fmla v6.4s, %16.4s, v2.4s \n" "fmla v7.4s, %25.4s, v2.4s \n" "fmla v8.4s, %16.4s, v3.4s \n" "fmla v9.4s, %25.4s, v3.4s \n" "fmla v16.4s, %17.4s, v1.4s \n" "fmla v17.4s, %26.4s, v1.4s \n" "fmla v18.4s, %17.4s, v2.4s \n" "fmla v19.4s, %26.4s, v2.4s \n" "fmla v6.4s, %17.4s, v3.4s \n" "fmla v7.4s, %26.4s, v3.4s \n" "fmla v8.4s, %17.4s, v4.4s \n" "fmla v9.4s, %26.4s, v4.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "fmla v18.4s, %18.4s, v3.4s \n" "fmla v19.4s, %27.4s, v3.4s \n" "fmla v6.4s, %18.4s, v4.4s \n" "fmla v7.4s, %27.4s, v4.4s \n" "fmla v8.4s, %18.4s, v5.4s \n" "fmla v9.4s, %27.4s, v5.4s \n" "ld1 {v0.4s}, [%0] \n" // sum00 sum01 sum02 sum03 "ld1 {v1.4s}, [%1] \n" // sum10 sum11 sum12 sum13 "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "faddp v18.4s, v18.4s, v18.4s \n" "faddp v19.4s, v19.4s, v19.4s \n" "faddp v6.4s, v6.4s, v6.4s \n" "faddp v7.4s, v7.4s, v7.4s \n" "faddp v8.4s, v8.4s, v8.4s \n" "faddp v9.4s, v9.4s, v9.4s \n" "faddp v16.2s, v16.2s, v18.2s \n" "faddp v17.2s, v17.2s, v19.2s \n" "faddp v6.2s, v6.2s, v8.2s \n" "faddp v7.2s, v7.2s, v9.2s \n" "trn1 v16.2d, v16.2d, v6.2d \n" "trn1 v17.2d, v17.2d, v7.2d \n" "fadd v0.4s, v0.4s, v16.4s \n" "fadd v1.4s, v1.4s, v17.4s \n" "st1 {v0.4s}, [%0], #16 \n" "st1 {v1.4s}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2] \n" // r00 r01 r02 r03 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %10.4s, v1.4s \n" "fmul v19.4s, %19.4s, v1.4s \n" "fmla v16.4s, %11.4s, v1.4s \n" "fmla v17.4s, %20.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %20.4s, v2.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3] \n" // r10 r11 r12 r12 "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %21.4s, v3.4s \n" "fmla v16.4s, %13.4s, v4.4s \n" "fmla v17.4s, %22.4s, v4.4s \n" "fmla v18.4s, %13.4s, v5.4s \n" "fmla v19.4s, %22.4s, v5.4s \n" "fmla v16.4s, %14.4s, v5.4s \n" "fmla v17.4s, %23.4s, v5.4s \n" "fmla v18.4s, %14.4s, v6.4s \n" "fmla v19.4s, %23.4s, v6.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%4] \n" // r20 r21 r22 r22 "fmla v16.4s, %15.4s, v6.4s \n" "fmla v17.4s, %24.4s, v6.4s \n" "fmla v18.4s, %15.4s, v7.4s \n" "fmla v19.4s, %24.4s, v7.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %16.4s, v1.4s \n" "fmla v19.4s, %25.4s, v1.4s \n" "fmla v16.4s, %17.4s, v1.4s \n" "fmla v17.4s, %26.4s, v1.4s \n" "fmla v18.4s, %17.4s, v2.4s \n" "fmla v19.4s, %26.4s, v2.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "fmla v18.4s, %18.4s, v3.4s \n" "fmla v19.4s, %27.4s, v3.4s \n" "ld1 {v4.2s}, [%0] \n" // sum00 sum01 "ld1 {v5.2s}, [%1] \n" // sum10 sum11 "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "faddp v18.4s, v18.4s, v18.4s \n" "faddp v19.4s, v19.4s, v19.4s \n" "add %2, %2, #32 \n" "faddp v16.2s, v16.2s, v18.2s \n" "faddp v17.2s, v17.2s, v19.2s \n" "add %3, %3, #32 \n" "fadd v4.2s, v4.2s, v16.2s \n" "fadd v5.2s, v5.2s, v17.2s \n" "add %4, %4, #32 \n" "st1 {v4.2s}, [%0], #8 \n" "st1 {v5.2s}, [%1], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%2, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%2] \n" // r00 r01 r02 "fmul v16.4s, %10.4s, v0.4s \n" "fmul v17.4s, %19.4s, v0.4s \n" "fmul v18.4s, %11.4s, v1.4s \n" "fmul v19.4s, %20.4s, v1.4s \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%3] \n" // r10 r11 r12 "fmla v16.4s, %12.4s, v2.4s \n" "fmla v17.4s, %21.4s, v2.4s \n" "fmla v18.4s, %13.4s, v3.4s \n" "fmla v19.4s, %22.4s, v3.4s \n" "fmla v16.4s, %14.4s, v4.4s \n" "fmla v17.4s, %23.4s, v4.4s \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%4] \n" // r20 r21 r22 "fmla v18.4s, %15.4s, v5.4s \n" "fmla v19.4s, %24.4s, v5.4s \n" "fmla v16.4s, %16.4s, v0.4s \n" "fmla v17.4s, %25.4s, v0.4s \n" "fmla v18.4s, %17.4s, v1.4s \n" "fmla v19.4s, %26.4s, v1.4s \n" "fmla v16.4s, %18.4s, v2.4s \n" "fmla v17.4s, %27.4s, v2.4s \n" "ld1 {v3.s}[0], [%0] \n" // sum00 "ld1 {v4.s}[0], [%1] \n" // sum10 "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "add %2, %2, #16 \n" "faddp v16.4s, v16.4s, v16.4s \n" "faddp v17.4s, v17.4s, v17.4s \n" "add %3, %3, #16 \n" "faddp v16.2s, v16.2s, v16.2s \n" "faddp v17.2s, v17.2s, v17.2s \n" "add %4, %4, #16 \n" "fadd v3.2s, v3.2s, v16.2s \n" "fadd v4.2s, v4.2s, v17.2s \n" "st1 {v3.s}[0], [%0], #4 \n" "st1 {v4.s}[0], [%1], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18", "v19"); } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } k0 += 9 * 4; k1 += 9 * 4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0 = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out0.fill(bias0); const float* k0 = kernel.channel(p); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 7 < outw; j += 8) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" // r04 r05 r06 r07 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %8.4s, v2.4s \n" "fmul v19.4s, %8.4s, v3.4s \n" "fmul v20.4s, %8.4s, v4.4s \n" "fmul v21.4s, %8.4s, v5.4s \n" "fmul v22.4s, %8.4s, v6.4s \n" "fmul v23.4s, %8.4s, v7.4s \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" // r08 r09 "fmla v16.4s, %9.4s, v1.4s \n" "fmla v17.4s, %9.4s, v2.4s \n" "fmla v18.4s, %9.4s, v3.4s \n" "fmla v19.4s, %9.4s, v4.4s \n" "fmla v20.4s, %9.4s, v5.4s \n" "fmla v21.4s, %9.4s, v6.4s \n" "fmla v22.4s, %9.4s, v7.4s \n" "fmla v23.4s, %9.4s, v8.4s \n" "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %10.4s, v4.4s \n" "fmla v19.4s, %10.4s, v5.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, %10.4s, v6.4s \n" "fmla v21.4s, %10.4s, v7.4s \n" "fmla v22.4s, %10.4s, v8.4s \n" "fmla v23.4s, %10.4s, v9.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v16.4s, %11.4s, v0.4s \n" "fmla v17.4s, %11.4s, v1.4s \n" "fmla v18.4s, %11.4s, v2.4s \n" "fmla v19.4s, %11.4s, v3.4s \n" "fmla v20.4s, %11.4s, v4.4s \n" "fmla v21.4s, %11.4s, v5.4s \n" "fmla v22.4s, %11.4s, v6.4s \n" "fmla v23.4s, %11.4s, v7.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r18 r19 "fmla v16.4s, %12.4s, v1.4s \n" "fmla v17.4s, %12.4s, v2.4s \n" "fmla v18.4s, %12.4s, v3.4s \n" "fmla v19.4s, %12.4s, v4.4s \n" "fmla v20.4s, %12.4s, v5.4s \n" "fmla v21.4s, %12.4s, v6.4s \n" "fmla v22.4s, %12.4s, v7.4s \n" "fmla v23.4s, %12.4s, v8.4s \n" "fmla v16.4s, %13.4s, v2.4s \n" "fmla v17.4s, %13.4s, v3.4s \n" "fmla v18.4s, %13.4s, v4.4s \n" "fmla v19.4s, %13.4s, v5.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v20.4s, %13.4s, v6.4s \n" "fmla v21.4s, %13.4s, v7.4s \n" "fmla v22.4s, %13.4s, v8.4s \n" "fmla v23.4s, %13.4s, v9.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %14.4s, v3.4s \n" "fmla v20.4s, %14.4s, v4.4s \n" "fmla v21.4s, %14.4s, v5.4s \n" "fmla v22.4s, %14.4s, v6.4s \n" "fmla v23.4s, %14.4s, v7.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r28 r29 "fmla v16.4s, %15.4s, v1.4s \n" "fmla v17.4s, %15.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %15.4s, v4.4s \n" "fmla v20.4s, %15.4s, v5.4s \n" "fmla v21.4s, %15.4s, v6.4s \n" "fmla v22.4s, %15.4s, v7.4s \n" "fmla v23.4s, %15.4s, v8.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "fmla v18.4s, %16.4s, v4.4s \n" "fmla v19.4s, %16.4s, v5.4s \n" "fmla v20.4s, %16.4s, v6.4s \n" "fmla v21.4s, %16.4s, v7.4s \n" "fmla v22.4s, %16.4s, v8.4s \n" "fmla v23.4s, %16.4s, v9.4s \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" // sum0 sum1 sum2 sum3 sum4 sum5 sum6 sum7 "faddp v16.4s, v16.4s, v17.4s \n" "faddp v18.4s, v18.4s, v19.4s \n" "faddp v20.4s, v20.4s, v21.4s \n" "faddp v22.4s, v22.4s, v23.4s \n" "faddp v16.4s, v16.4s, v18.4s \n" "faddp v20.4s, v20.4s, v22.4s \n" "fadd v0.4s, v0.4s, v16.4s \n" "fadd v1.4s, v1.4s, v20.4s \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } #endif // __aarch64__ for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%1, #256] \n" "ld1 {v8.4s, v9.4s}, [%1] \n" // r04 r05 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %8.4s, v2.4s \n" "fmul v19.4s, %8.4s, v3.4s \n" "fmla v16.4s, %9.4s, v1.4s \n" "fmla v17.4s, %9.4s, v2.4s \n" "fmla v18.4s, %9.4s, v3.4s \n" "fmla v19.4s, %9.4s, v8.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %10.4s, v8.4s \n" "fmla v19.4s, %10.4s, v9.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r14 r15 "fmla v16.4s, %11.4s, v4.4s \n" "fmla v17.4s, %11.4s, v5.4s \n" "fmla v18.4s, %11.4s, v6.4s \n" "fmla v19.4s, %11.4s, v7.4s \n" "fmla v16.4s, %12.4s, v5.4s \n" "fmla v17.4s, %12.4s, v6.4s \n" "fmla v18.4s, %12.4s, v7.4s \n" "fmla v19.4s, %12.4s, v8.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v16.4s, %13.4s, v6.4s \n" "fmla v17.4s, %13.4s, v7.4s \n" "fmla v18.4s, %13.4s, v8.4s \n" "fmla v19.4s, %13.4s, v9.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r24 r25 "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %14.4s, v2.4s \n" "fmla v19.4s, %14.4s, v3.4s \n" "fmla v16.4s, %15.4s, v1.4s \n" "fmla v17.4s, %15.4s, v2.4s \n" "fmla v18.4s, %15.4s, v3.4s \n" "fmla v19.4s, %15.4s, v8.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "fmla v18.4s, %16.4s, v8.4s \n" "fmla v19.4s, %16.4s, v9.4s \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" // sum0 sum1 sum2 sum3 "faddp v16.4s, v16.4s, v17.4s \n" "faddp v18.4s, v18.4s, v19.4s \n" "faddp v16.4s, v16.4s, v18.4s \n" "fadd v0.4s, v0.4s, v16.4s \n" "st1 {v0.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r00 r01 "vmul.f32 q3, %q8, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d4-d5}, [%1 :128]! \n" // r02 "vmul.f32 q4, %q8, q1 \n" "vmla.f32 q3, %q9, q1 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r03 r04 "vmul.f32 q5, %q8, q2 \n" "vmla.f32 q4, %q9, q2 \n" "vmla.f32 q3, %q10, q2 \n" "vmul.f32 q6, %q8, q0 \n" "vmla.f32 q5, %q9, q0 \n" "vmla.f32 q4, %q10, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d4-d5}, [%1 :128] \n" // r05 "vmla.f32 q6, %q9, q1 \n" "vmla.f32 q5, %q10, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r10 r11 "vmla.f32 q6, %q10, q2 \n" "vmla.f32 q3, %q11, q0 \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2 :128]! \n" // r12 "vmla.f32 q4, %q11, q1 \n" "vmla.f32 q3, %q12, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r13 r14 "vmla.f32 q5, %q11, q2 \n" "vmla.f32 q4, %q12, q2 \n" "vmla.f32 q3, %q13, q2 \n" "vmla.f32 q6, %q11, q0 \n" "vmla.f32 q5, %q12, q0 \n" "vmla.f32 q4, %q13, q0 \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2 :128] \n" // r15 "vmla.f32 q6, %q12, q1 \n" "vmla.f32 q5, %q13, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r20 r21 "vmla.f32 q6, %q13, q2 \n" "vmla.f32 q3, %q14, q0 \n" "pld [%3, #128] \n" "vld1.f32 {d4-d5}, [%3 :128]! \n" // r22 "vmla.f32 q4, %q14, q1 \n" "vmla.f32 q3, %q15, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r23 r24 "vmla.f32 q5, %q14, q2 \n" "vmla.f32 q4, %q15, q2 \n" "vmla.f32 q3, %q16, q2 \n" "vmla.f32 q6, %q14, q0 \n" "vmla.f32 q5, %q15, q0 \n" "vmla.f32 q4, %q16, q0 \n" "pld [%3, #128] \n" "vld1.f32 {d4-d5}, [%3 :128] \n" // r25 "vmla.f32 q6, %q15, q1 \n" "vmla.f32 q5, %q16, q1 \n" "vld1.f32 {d0-d1}, [%0] \n" // sum0 sum1 sum2 sum3 "vmla.f32 q6, %q16, q2 \n" "vadd.f32 d6, d6, d7 \n" "vadd.f32 d8, d8, d9 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d12, d12, d13 \n" "sub %1, %1, #16 \n" "vpadd.f32 d6, d6, d8 \n" "vpadd.f32 d7, d10, d12 \n" "sub %2, %2, #16 \n" "vadd.f32 q0, q0, q3 \n" "sub %3, %3, #16 \n" "vst1.f32 {d0-d1}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1] \n" // r00 r01 r02 r03 "fmul v16.4s, %8.4s, v0.4s \n" "fmul v17.4s, %8.4s, v1.4s \n" "fmul v18.4s, %9.4s, v1.4s \n" "fmul v19.4s, %9.4s, v2.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2] \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %10.4s, v3.4s \n" "fmla v18.4s, %11.4s, v4.4s \n" "fmla v19.4s, %11.4s, v5.4s \n" "fmla v16.4s, %12.4s, v5.4s \n" "fmla v17.4s, %12.4s, v6.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3] \n" // r20 r21 r22 r23 "fmla v18.4s, %13.4s, v6.4s \n" "fmla v19.4s, %13.4s, v7.4s \n" "fmla v16.4s, %14.4s, v0.4s \n" "fmla v17.4s, %14.4s, v1.4s \n" "fmla v18.4s, %15.4s, v1.4s \n" "fmla v19.4s, %15.4s, v2.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fmla v17.4s, %16.4s, v3.4s \n" "ld1 {v0.2s}, [%0] \n" // sum0 sum1 "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "add %1, %1, #32 \n" "faddp v16.4s, v16.4s, v17.4s \n" "add %2, %2, #32 \n" "faddp v16.4s, v16.4s, v16.4s \n" "add %3, %3, #32 \n" "fadd v0.2s, v0.2s, v16.2s \n" "st1 {v0.2s}, [%0], #8 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128]! \n" // r00 r01 "vmul.f32 q5, %q8, q0 \n" "vmul.f32 q6, %q8, q1 \n" "vmul.f32 q2, %q9, q1 \n" "pld [%1, #256] \n" "vld1.f32 {d0-d3}, [%1 :128] \n" // r02 r03 "vmul.f32 q3, %q9, q0 \n" "vmla.f32 q5, %q10, q0 \n" "vmla.f32 q6, %q10, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" // r10 r11 "vmla.f32 q2, %q11, q0 \n" "vmla.f32 q3, %q11, q1 \n" "vmla.f32 q5, %q12, q1 \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128] \n" // r12 r13 "vmla.f32 q6, %q12, q0 \n" "vmla.f32 q2, %q13, q0 \n" "vmla.f32 q3, %q13, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128]! \n" // r20 r21 "vmla.f32 q5, %q14, q0 \n" "vmla.f32 q6, %q14, q1 \n" "vmla.f32 q2, %q15, q1 \n" "pld [%3, #256] \n" "vld1.f32 {d0-d3}, [%3 :128] \n" // r22 r23 "vmla.f32 q3, %q15, q0 \n" "vmla.f32 q5, %q16, q0 \n" "vmla.f32 q6, %q16, q1 \n" "vld1.f32 {d8}, [%0] \n" // sum0 sum1 "vadd.f32 q5, q5, q2 \n" "vadd.f32 q6, q6, q3 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d12, d12, d13 \n" "vpadd.f32 d10, d10, d12 \n" "vadd.f32 d8, d8, d10 \n" "vst1.f32 {d8}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6"); #endif // __aarch64__ } for (; j < outw; j++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%1] \n" // r00 r01 r02 "eor v16.16b, v16.16b, v16.16b \n" "ld1 {v16.s}[0], [%0] \n" // sum0 "fmul v17.4s, %8.4s, v0.4s \n" "fmul v18.4s, %9.4s, v1.4s \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v3.4s, v4.4s, v5.4s}, [%2] \n" // r10 r11 r12 "fmla v16.4s, %10.4s, v2.4s \n" "fmla v17.4s, %11.4s, v3.4s \n" "fmla v18.4s, %12.4s, v4.4s \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v0.4s, v1.4s, v2.4s}, [%3] \n" // r20 r21 r22 "fmla v16.4s, %13.4s, v5.4s \n" "fmla v17.4s, %14.4s, v0.4s \n" "fmla v18.4s, %15.4s, v1.4s \n" "fmla v16.4s, %16.4s, v2.4s \n" "fadd v17.4s, v17.4s, v18.4s \n" "fadd v16.4s, v16.4s, v17.4s \n" "add %1, %1, #16 \n" "faddp v16.4s, v16.4s, v16.4s \n" "add %2, %2, #16 \n" "faddp v16.2s, v16.2s, v16.2s \n" "add %3, %3, #16 \n" "st1 {v16.s}[0], [%0], #4 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v16", "v17", "v18"); #else // __aarch64__ asm volatile( "pld [%1, #384] \n" "vldm %1, {d0-d5} \n" // r00 r01 r02 "veor q3, q3 \n" "vld1.f32 {d6[0]}, [%0] \n" // sum0 "vmul.f32 q4, %q8, q0 \n" "vmul.f32 q5, %q9, q1 \n" "vmla.f32 q3, %q10, q2 \n" "pld [%2, #384] \n" "vldm %2, {d0-d5} \n" // r10 r11 r12 "vmla.f32 q4, %q11, q0 \n" "vmla.f32 q5, %q12, q1 \n" "vmla.f32 q3, %q13, q2 \n" "pld [%3, #384] \n" "vldm %3, {d0-d5} \n" // r20 r21 r22 "vmla.f32 q4, %q14, q0 \n" "vmla.f32 q5, %q15, q1 \n" "vmla.f32 q3, %q16, q2 \n" "vadd.f32 q4, q4, q5 \n" "vadd.f32 q3, q3, q4 \n" "add %1, %1, #16 \n" "vadd.f32 d6, d6, d7 \n" "add %2, %2, #16 \n" "vpadd.f32 d6, d6, d6 \n" "add %3, %3, #16 \n" "vst1.f32 {d6[0]}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q3", "q4", "q5"); #endif // __aarch64__ } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } k0 += 9 * 4; } } }
15.c
#include<stdio.h> #include<omp.h> #include<stdlib.h> int main() { float sum, serialsum; int n, i, k, NoOfthreads; printf("Enter number of threads: "); scanf("%d", &NoOfthreads); printf("array size: "); scanf("%d", &n); if ((NoOfthreads!=1) && (NoOfthreads!=2) && (NoOfthreads!=4) && (NoOfthreads!=8) && (NoOfthreads!= 16)) { printf("\n Number of threads should be 1,2,4,8 or 16 for the execution of program. \n\n"); exit(-1); } float array[n], check[n]; for(i=0; i<n; i++) { array[i] = i + 5; check[i] = array[i]; } sum = 0.0; omp_set_num_threads(NoOfthreads); #pragma omp parallel for reduction(+ : sum) for(i=0; i<n; i++) sum = sum + array_A[i]; serialsum = 0.0; for (i = 0; i < n; i++) serialsum = serialsum +checkarray[i]; if(serialsum != sum) { printf("\n\n\t\t The parallel calculation of array sum is different from serial calculation \n"); exit(-1); } else printf("\n\n\t\t The parallel calculation of array sum is same with serial calculation \n"); printf("\n\t\t The SumOfElements Of The Array Using OpenMP Directives Is %f\n", sum); printf("\t\t The SumOfElements Of The Array By Serial Calculation Is %f\n\n", serialsum); }
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=GetMagickResourceLimit(WidthResource); cache_info->height_limit=GetMagickResourceLimit(HeightResource); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads, sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); (void) exception; cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; ssize_t i; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) { #if defined(MAGICKCORE_HAVE_LINUX_SENDFILE) if (cache_info->length < 0x7ffff000) { count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL, (size_t) cache_info->length); if (count == (ssize_t) cache_info->length) return(MagickTrue); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); } #endif quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); } buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { const Quantum *magick_restrict p; Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; const Quantum *magick_restrict p; const void *magick_restrict r; Quantum *magick_restrict q; ssize_t i, u; unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; Quantum pixel; if (fabs(alpha-OpaqueAlpha) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q, beta)); return(pixel); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply composite mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; ssize_t i; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity) { length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); } source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->type=DiskCache; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length == (MagickSizeType) ((size_t) length)) { status=AcquireMagickResource(MapResource,cache_info->length); if (status != MagickFalse) { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; ssize_t y; unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; Quantum *magick_restrict q; ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static inline MagickBooleanType ValidatePixelRange(const ssize_t alpha, const ssize_t beta) { ssize_t gamma; gamma=alpha+beta; if (((alpha > 0) && (beta > 0) && (gamma < 0)) || ((alpha < 0) && (beta < 0) && (gamma > 0))) return(MagickFalse); return(MagickTrue); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if ((width > (size_t) SSIZE_MAX) || (height > (size_t) SSIZE_MAX) || ((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit) || (ValidatePixelRange(x,(ssize_t) width) == MagickFalse) || (ValidatePixelRange(y,(ssize_t) height) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const unsigned char *magick_restrict p; ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const Quantum *magick_restrict p; ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
kmp_atomic.c
/* * kmp_atomic.c -- ATOMIC implementation routines */ //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.txt for details. // //===----------------------------------------------------------------------===// #include "kmp_atomic.h" #include "kmp.h" // TRUE, asm routines prototypes typedef unsigned char uchar; typedef unsigned short ushort; /*! @defgroup ATOMIC_OPS Atomic Operations These functions are used for implementing the many different varieties of atomic operations. The compiler is at liberty to inline atomic operations that are naturally supported by the target architecture. For instance on IA-32 architecture an atomic like this can be inlined @code static int s = 0; #pragma omp atomic s++; @endcode using the single instruction: `lock; incl s` However the runtime does provide entrypoints for these operations to support compilers that choose not to inline them. (For instance, `__kmpc_atomic_fixed4_add` could be used to perform the increment above.) The names of the functions are encoded by using the data type name and the operation name, as in these tables. Data Type | Data type encoding -----------|--------------- int8_t | `fixed1` uint8_t | `fixed1u` int16_t | `fixed2` uint16_t | `fixed2u` int32_t | `fixed4` uint32_t | `fixed4u` int32_t | `fixed8` uint32_t | `fixed8u` float | `float4` double | `float8` float 10 (8087 eighty bit float) | `float10` complex<float> | `cmplx4` complex<double> | `cmplx8` complex<float10> | `cmplx10` <br> Operation | Operation encoding ----------|------------------- + | add - | sub \* | mul / | div & | andb << | shl \>\> | shr \| | orb ^ | xor && | andl \|\| | orl maximum | max minimum | min .eqv. | eqv .neqv. | neqv <br> For non-commutative operations, `_rev` can also be added for the reversed operation. For the functions that capture the result, the suffix `_cpt` is added. Update Functions ================ The general form of an atomic function that just performs an update (without a `capture`) @code void __kmpc_atomic_<datatype>_<operation>( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand `capture` functions =================== The capture functions perform an atomic update and return a result, which is either the value before the capture, or that after. They take an additional argument to determine which result is returned. Their general form is therefore @code TYPE __kmpc_atomic_<datatype>_<operation>_cpt( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ); @endcode @param ident_t a pointer to source location @param gtid the global thread id @param lhs a pointer to the left operand @param rhs the right operand @param flag one if the result is to be captured *after* the operation, zero if captured *before*. The one set of exceptions to this is the `complex<float>` type where the value is not returned, rather an extra argument pointer is passed. They look like @code void __kmpc_atomic_cmplx4_<op>_cpt( ident_t *id_ref, int gtid, kmp_cmplx32 * lhs, kmp_cmplx32 rhs, kmp_cmplx32 * out, int flag ); @endcode Read and Write Operations ========================= The OpenMP<sup>*</sup> standard now supports atomic operations that simply ensure that the value is read or written atomically, with no modification performed. In many cases on IA-32 architecture these operations can be inlined since the architecture guarantees that no tearing occurs on aligned objects accessed with a single memory operation of up to 64 bits in size. The general form of the read operations is @code TYPE __kmpc_atomic_<type>_rd ( ident_t *id_ref, int gtid, TYPE * loc ); @endcode For the write operations the form is @code void __kmpc_atomic_<type>_wr ( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ); @endcode Full list of functions ====================== This leads to the generation of 376 atomic functions, as follows. Functons for integers --------------------- There are versions here for integers of size 1,2,4 and 8 bytes both signed and unsigned (where that matters). @code __kmpc_atomic_fixed1_add __kmpc_atomic_fixed1_add_cpt __kmpc_atomic_fixed1_add_fp __kmpc_atomic_fixed1_andb __kmpc_atomic_fixed1_andb_cpt __kmpc_atomic_fixed1_andl __kmpc_atomic_fixed1_andl_cpt __kmpc_atomic_fixed1_div __kmpc_atomic_fixed1_div_cpt __kmpc_atomic_fixed1_div_cpt_rev __kmpc_atomic_fixed1_div_float8 __kmpc_atomic_fixed1_div_fp __kmpc_atomic_fixed1_div_rev __kmpc_atomic_fixed1_eqv __kmpc_atomic_fixed1_eqv_cpt __kmpc_atomic_fixed1_max __kmpc_atomic_fixed1_max_cpt __kmpc_atomic_fixed1_min __kmpc_atomic_fixed1_min_cpt __kmpc_atomic_fixed1_mul __kmpc_atomic_fixed1_mul_cpt __kmpc_atomic_fixed1_mul_float8 __kmpc_atomic_fixed1_mul_fp __kmpc_atomic_fixed1_neqv __kmpc_atomic_fixed1_neqv_cpt __kmpc_atomic_fixed1_orb __kmpc_atomic_fixed1_orb_cpt __kmpc_atomic_fixed1_orl __kmpc_atomic_fixed1_orl_cpt __kmpc_atomic_fixed1_rd __kmpc_atomic_fixed1_shl __kmpc_atomic_fixed1_shl_cpt __kmpc_atomic_fixed1_shl_cpt_rev __kmpc_atomic_fixed1_shl_rev __kmpc_atomic_fixed1_shr __kmpc_atomic_fixed1_shr_cpt __kmpc_atomic_fixed1_shr_cpt_rev __kmpc_atomic_fixed1_shr_rev __kmpc_atomic_fixed1_sub __kmpc_atomic_fixed1_sub_cpt __kmpc_atomic_fixed1_sub_cpt_rev __kmpc_atomic_fixed1_sub_fp __kmpc_atomic_fixed1_sub_rev __kmpc_atomic_fixed1_swp __kmpc_atomic_fixed1_wr __kmpc_atomic_fixed1_xor __kmpc_atomic_fixed1_xor_cpt __kmpc_atomic_fixed1u_add_fp __kmpc_atomic_fixed1u_sub_fp __kmpc_atomic_fixed1u_mul_fp __kmpc_atomic_fixed1u_div __kmpc_atomic_fixed1u_div_cpt __kmpc_atomic_fixed1u_div_cpt_rev __kmpc_atomic_fixed1u_div_fp __kmpc_atomic_fixed1u_div_rev __kmpc_atomic_fixed1u_shr __kmpc_atomic_fixed1u_shr_cpt __kmpc_atomic_fixed1u_shr_cpt_rev __kmpc_atomic_fixed1u_shr_rev __kmpc_atomic_fixed2_add __kmpc_atomic_fixed2_add_cpt __kmpc_atomic_fixed2_add_fp __kmpc_atomic_fixed2_andb __kmpc_atomic_fixed2_andb_cpt __kmpc_atomic_fixed2_andl __kmpc_atomic_fixed2_andl_cpt __kmpc_atomic_fixed2_div __kmpc_atomic_fixed2_div_cpt __kmpc_atomic_fixed2_div_cpt_rev __kmpc_atomic_fixed2_div_float8 __kmpc_atomic_fixed2_div_fp __kmpc_atomic_fixed2_div_rev __kmpc_atomic_fixed2_eqv __kmpc_atomic_fixed2_eqv_cpt __kmpc_atomic_fixed2_max __kmpc_atomic_fixed2_max_cpt __kmpc_atomic_fixed2_min __kmpc_atomic_fixed2_min_cpt __kmpc_atomic_fixed2_mul __kmpc_atomic_fixed2_mul_cpt __kmpc_atomic_fixed2_mul_float8 __kmpc_atomic_fixed2_mul_fp __kmpc_atomic_fixed2_neqv __kmpc_atomic_fixed2_neqv_cpt __kmpc_atomic_fixed2_orb __kmpc_atomic_fixed2_orb_cpt __kmpc_atomic_fixed2_orl __kmpc_atomic_fixed2_orl_cpt __kmpc_atomic_fixed2_rd __kmpc_atomic_fixed2_shl __kmpc_atomic_fixed2_shl_cpt __kmpc_atomic_fixed2_shl_cpt_rev __kmpc_atomic_fixed2_shl_rev __kmpc_atomic_fixed2_shr __kmpc_atomic_fixed2_shr_cpt __kmpc_atomic_fixed2_shr_cpt_rev __kmpc_atomic_fixed2_shr_rev __kmpc_atomic_fixed2_sub __kmpc_atomic_fixed2_sub_cpt __kmpc_atomic_fixed2_sub_cpt_rev __kmpc_atomic_fixed2_sub_fp __kmpc_atomic_fixed2_sub_rev __kmpc_atomic_fixed2_swp __kmpc_atomic_fixed2_wr __kmpc_atomic_fixed2_xor __kmpc_atomic_fixed2_xor_cpt __kmpc_atomic_fixed2u_add_fp __kmpc_atomic_fixed2u_sub_fp __kmpc_atomic_fixed2u_mul_fp __kmpc_atomic_fixed2u_div __kmpc_atomic_fixed2u_div_cpt __kmpc_atomic_fixed2u_div_cpt_rev __kmpc_atomic_fixed2u_div_fp __kmpc_atomic_fixed2u_div_rev __kmpc_atomic_fixed2u_shr __kmpc_atomic_fixed2u_shr_cpt __kmpc_atomic_fixed2u_shr_cpt_rev __kmpc_atomic_fixed2u_shr_rev __kmpc_atomic_fixed4_add __kmpc_atomic_fixed4_add_cpt __kmpc_atomic_fixed4_add_fp __kmpc_atomic_fixed4_andb __kmpc_atomic_fixed4_andb_cpt __kmpc_atomic_fixed4_andl __kmpc_atomic_fixed4_andl_cpt __kmpc_atomic_fixed4_div __kmpc_atomic_fixed4_div_cpt __kmpc_atomic_fixed4_div_cpt_rev __kmpc_atomic_fixed4_div_float8 __kmpc_atomic_fixed4_div_fp __kmpc_atomic_fixed4_div_rev __kmpc_atomic_fixed4_eqv __kmpc_atomic_fixed4_eqv_cpt __kmpc_atomic_fixed4_max __kmpc_atomic_fixed4_max_cpt __kmpc_atomic_fixed4_min __kmpc_atomic_fixed4_min_cpt __kmpc_atomic_fixed4_mul __kmpc_atomic_fixed4_mul_cpt __kmpc_atomic_fixed4_mul_float8 __kmpc_atomic_fixed4_mul_fp __kmpc_atomic_fixed4_neqv __kmpc_atomic_fixed4_neqv_cpt __kmpc_atomic_fixed4_orb __kmpc_atomic_fixed4_orb_cpt __kmpc_atomic_fixed4_orl __kmpc_atomic_fixed4_orl_cpt __kmpc_atomic_fixed4_rd __kmpc_atomic_fixed4_shl __kmpc_atomic_fixed4_shl_cpt __kmpc_atomic_fixed4_shl_cpt_rev __kmpc_atomic_fixed4_shl_rev __kmpc_atomic_fixed4_shr __kmpc_atomic_fixed4_shr_cpt __kmpc_atomic_fixed4_shr_cpt_rev __kmpc_atomic_fixed4_shr_rev __kmpc_atomic_fixed4_sub __kmpc_atomic_fixed4_sub_cpt __kmpc_atomic_fixed4_sub_cpt_rev __kmpc_atomic_fixed4_sub_fp __kmpc_atomic_fixed4_sub_rev __kmpc_atomic_fixed4_swp __kmpc_atomic_fixed4_wr __kmpc_atomic_fixed4_xor __kmpc_atomic_fixed4_xor_cpt __kmpc_atomic_fixed4u_add_fp __kmpc_atomic_fixed4u_sub_fp __kmpc_atomic_fixed4u_mul_fp __kmpc_atomic_fixed4u_div __kmpc_atomic_fixed4u_div_cpt __kmpc_atomic_fixed4u_div_cpt_rev __kmpc_atomic_fixed4u_div_fp __kmpc_atomic_fixed4u_div_rev __kmpc_atomic_fixed4u_shr __kmpc_atomic_fixed4u_shr_cpt __kmpc_atomic_fixed4u_shr_cpt_rev __kmpc_atomic_fixed4u_shr_rev __kmpc_atomic_fixed8_add __kmpc_atomic_fixed8_add_cpt __kmpc_atomic_fixed8_add_fp __kmpc_atomic_fixed8_andb __kmpc_atomic_fixed8_andb_cpt __kmpc_atomic_fixed8_andl __kmpc_atomic_fixed8_andl_cpt __kmpc_atomic_fixed8_div __kmpc_atomic_fixed8_div_cpt __kmpc_atomic_fixed8_div_cpt_rev __kmpc_atomic_fixed8_div_float8 __kmpc_atomic_fixed8_div_fp __kmpc_atomic_fixed8_div_rev __kmpc_atomic_fixed8_eqv __kmpc_atomic_fixed8_eqv_cpt __kmpc_atomic_fixed8_max __kmpc_atomic_fixed8_max_cpt __kmpc_atomic_fixed8_min __kmpc_atomic_fixed8_min_cpt __kmpc_atomic_fixed8_mul __kmpc_atomic_fixed8_mul_cpt __kmpc_atomic_fixed8_mul_float8 __kmpc_atomic_fixed8_mul_fp __kmpc_atomic_fixed8_neqv __kmpc_atomic_fixed8_neqv_cpt __kmpc_atomic_fixed8_orb __kmpc_atomic_fixed8_orb_cpt __kmpc_atomic_fixed8_orl __kmpc_atomic_fixed8_orl_cpt __kmpc_atomic_fixed8_rd __kmpc_atomic_fixed8_shl __kmpc_atomic_fixed8_shl_cpt __kmpc_atomic_fixed8_shl_cpt_rev __kmpc_atomic_fixed8_shl_rev __kmpc_atomic_fixed8_shr __kmpc_atomic_fixed8_shr_cpt __kmpc_atomic_fixed8_shr_cpt_rev __kmpc_atomic_fixed8_shr_rev __kmpc_atomic_fixed8_sub __kmpc_atomic_fixed8_sub_cpt __kmpc_atomic_fixed8_sub_cpt_rev __kmpc_atomic_fixed8_sub_fp __kmpc_atomic_fixed8_sub_rev __kmpc_atomic_fixed8_swp __kmpc_atomic_fixed8_wr __kmpc_atomic_fixed8_xor __kmpc_atomic_fixed8_xor_cpt __kmpc_atomic_fixed8u_add_fp __kmpc_atomic_fixed8u_sub_fp __kmpc_atomic_fixed8u_mul_fp __kmpc_atomic_fixed8u_div __kmpc_atomic_fixed8u_div_cpt __kmpc_atomic_fixed8u_div_cpt_rev __kmpc_atomic_fixed8u_div_fp __kmpc_atomic_fixed8u_div_rev __kmpc_atomic_fixed8u_shr __kmpc_atomic_fixed8u_shr_cpt __kmpc_atomic_fixed8u_shr_cpt_rev __kmpc_atomic_fixed8u_shr_rev @endcode Functions for floating point ---------------------------- There are versions here for floating point numbers of size 4, 8, 10 and 16 bytes. (Ten byte floats are used by X87, but are now rare). @code __kmpc_atomic_float4_add __kmpc_atomic_float4_add_cpt __kmpc_atomic_float4_add_float8 __kmpc_atomic_float4_add_fp __kmpc_atomic_float4_div __kmpc_atomic_float4_div_cpt __kmpc_atomic_float4_div_cpt_rev __kmpc_atomic_float4_div_float8 __kmpc_atomic_float4_div_fp __kmpc_atomic_float4_div_rev __kmpc_atomic_float4_max __kmpc_atomic_float4_max_cpt __kmpc_atomic_float4_min __kmpc_atomic_float4_min_cpt __kmpc_atomic_float4_mul __kmpc_atomic_float4_mul_cpt __kmpc_atomic_float4_mul_float8 __kmpc_atomic_float4_mul_fp __kmpc_atomic_float4_rd __kmpc_atomic_float4_sub __kmpc_atomic_float4_sub_cpt __kmpc_atomic_float4_sub_cpt_rev __kmpc_atomic_float4_sub_float8 __kmpc_atomic_float4_sub_fp __kmpc_atomic_float4_sub_rev __kmpc_atomic_float4_swp __kmpc_atomic_float4_wr __kmpc_atomic_float8_add __kmpc_atomic_float8_add_cpt __kmpc_atomic_float8_add_fp __kmpc_atomic_float8_div __kmpc_atomic_float8_div_cpt __kmpc_atomic_float8_div_cpt_rev __kmpc_atomic_float8_div_fp __kmpc_atomic_float8_div_rev __kmpc_atomic_float8_max __kmpc_atomic_float8_max_cpt __kmpc_atomic_float8_min __kmpc_atomic_float8_min_cpt __kmpc_atomic_float8_mul __kmpc_atomic_float8_mul_cpt __kmpc_atomic_float8_mul_fp __kmpc_atomic_float8_rd __kmpc_atomic_float8_sub __kmpc_atomic_float8_sub_cpt __kmpc_atomic_float8_sub_cpt_rev __kmpc_atomic_float8_sub_fp __kmpc_atomic_float8_sub_rev __kmpc_atomic_float8_swp __kmpc_atomic_float8_wr __kmpc_atomic_float10_add __kmpc_atomic_float10_add_cpt __kmpc_atomic_float10_add_fp __kmpc_atomic_float10_div __kmpc_atomic_float10_div_cpt __kmpc_atomic_float10_div_cpt_rev __kmpc_atomic_float10_div_fp __kmpc_atomic_float10_div_rev __kmpc_atomic_float10_mul __kmpc_atomic_float10_mul_cpt __kmpc_atomic_float10_mul_fp __kmpc_atomic_float10_rd __kmpc_atomic_float10_sub __kmpc_atomic_float10_sub_cpt __kmpc_atomic_float10_sub_cpt_rev __kmpc_atomic_float10_sub_fp __kmpc_atomic_float10_sub_rev __kmpc_atomic_float10_swp __kmpc_atomic_float10_wr __kmpc_atomic_float16_add __kmpc_atomic_float16_add_cpt __kmpc_atomic_float16_div __kmpc_atomic_float16_div_cpt __kmpc_atomic_float16_div_cpt_rev __kmpc_atomic_float16_div_rev __kmpc_atomic_float16_max __kmpc_atomic_float16_max_cpt __kmpc_atomic_float16_min __kmpc_atomic_float16_min_cpt __kmpc_atomic_float16_mul __kmpc_atomic_float16_mul_cpt __kmpc_atomic_float16_rd __kmpc_atomic_float16_sub __kmpc_atomic_float16_sub_cpt __kmpc_atomic_float16_sub_cpt_rev __kmpc_atomic_float16_sub_rev __kmpc_atomic_float16_swp __kmpc_atomic_float16_wr @endcode Functions for Complex types --------------------------- Functions for complex types whose component floating point variables are of size 4,8,10 or 16 bytes. The names here are based on the size of the component float, *not* the size of the complex type. So `__kmpc_atomc_cmplx8_add` is an operation on a `complex<double>` or `complex(kind=8)`, *not* `complex<float>`. @code __kmpc_atomic_cmplx4_add __kmpc_atomic_cmplx4_add_cmplx8 __kmpc_atomic_cmplx4_add_cpt __kmpc_atomic_cmplx4_div __kmpc_atomic_cmplx4_div_cmplx8 __kmpc_atomic_cmplx4_div_cpt __kmpc_atomic_cmplx4_div_cpt_rev __kmpc_atomic_cmplx4_div_rev __kmpc_atomic_cmplx4_mul __kmpc_atomic_cmplx4_mul_cmplx8 __kmpc_atomic_cmplx4_mul_cpt __kmpc_atomic_cmplx4_rd __kmpc_atomic_cmplx4_sub __kmpc_atomic_cmplx4_sub_cmplx8 __kmpc_atomic_cmplx4_sub_cpt __kmpc_atomic_cmplx4_sub_cpt_rev __kmpc_atomic_cmplx4_sub_rev __kmpc_atomic_cmplx4_swp __kmpc_atomic_cmplx4_wr __kmpc_atomic_cmplx8_add __kmpc_atomic_cmplx8_add_cpt __kmpc_atomic_cmplx8_div __kmpc_atomic_cmplx8_div_cpt __kmpc_atomic_cmplx8_div_cpt_rev __kmpc_atomic_cmplx8_div_rev __kmpc_atomic_cmplx8_mul __kmpc_atomic_cmplx8_mul_cpt __kmpc_atomic_cmplx8_rd __kmpc_atomic_cmplx8_sub __kmpc_atomic_cmplx8_sub_cpt __kmpc_atomic_cmplx8_sub_cpt_rev __kmpc_atomic_cmplx8_sub_rev __kmpc_atomic_cmplx8_swp __kmpc_atomic_cmplx8_wr __kmpc_atomic_cmplx10_add __kmpc_atomic_cmplx10_add_cpt __kmpc_atomic_cmplx10_div __kmpc_atomic_cmplx10_div_cpt __kmpc_atomic_cmplx10_div_cpt_rev __kmpc_atomic_cmplx10_div_rev __kmpc_atomic_cmplx10_mul __kmpc_atomic_cmplx10_mul_cpt __kmpc_atomic_cmplx10_rd __kmpc_atomic_cmplx10_sub __kmpc_atomic_cmplx10_sub_cpt __kmpc_atomic_cmplx10_sub_cpt_rev __kmpc_atomic_cmplx10_sub_rev __kmpc_atomic_cmplx10_swp __kmpc_atomic_cmplx10_wr __kmpc_atomic_cmplx16_add __kmpc_atomic_cmplx16_add_cpt __kmpc_atomic_cmplx16_div __kmpc_atomic_cmplx16_div_cpt __kmpc_atomic_cmplx16_div_cpt_rev __kmpc_atomic_cmplx16_div_rev __kmpc_atomic_cmplx16_mul __kmpc_atomic_cmplx16_mul_cpt __kmpc_atomic_cmplx16_rd __kmpc_atomic_cmplx16_sub __kmpc_atomic_cmplx16_sub_cpt __kmpc_atomic_cmplx16_sub_cpt_rev __kmpc_atomic_cmplx16_swp __kmpc_atomic_cmplx16_wr @endcode */ /*! @ingroup ATOMIC_OPS @{ */ /* * Global vars */ #ifndef KMP_GOMP_COMPAT int __kmp_atomic_mode = 1; // Intel perf #else int __kmp_atomic_mode = 2; // GOMP compatibility #endif /* KMP_GOMP_COMPAT */ KMP_ALIGN(128) kmp_atomic_lock_t __kmp_atomic_lock; /* Control access to all user coded atomics in Gnu compat mode */ kmp_atomic_lock_t __kmp_atomic_lock_1i; /* Control access to all user coded atomics for 1-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_2i; /* Control access to all user coded atomics for 2-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4i; /* Control access to all user coded atomics for 4-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_4r; /* Control access to all user coded atomics for kmp_real32 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8i; /* Control access to all user coded atomics for 8-byte fixed data types */ kmp_atomic_lock_t __kmp_atomic_lock_8r; /* Control access to all user coded atomics for kmp_real64 data type */ kmp_atomic_lock_t __kmp_atomic_lock_8c; /* Control access to all user coded atomics for complex byte data type */ kmp_atomic_lock_t __kmp_atomic_lock_10r; /* Control access to all user coded atomics for long double data type */ kmp_atomic_lock_t __kmp_atomic_lock_16r; /* Control access to all user coded atomics for _Quad data type */ kmp_atomic_lock_t __kmp_atomic_lock_16c; /* Control access to all user coded atomics for double complex data type*/ kmp_atomic_lock_t __kmp_atomic_lock_20c; /* Control access to all user coded atomics for long double complex type*/ kmp_atomic_lock_t __kmp_atomic_lock_32c; /* Control access to all user coded atomics for _Quad complex data type */ /* 2007-03-02: Without "volatile" specifier in OP_CMPXCHG and MIN_MAX_CMPXCHG we have a bug on *_32 and *_32e. This is just a temporary workaround for the problem. It seems the right solution is writing OP_CMPXCHG and MIN_MAX_CMPXCHG routines in assembler language. */ #define KMP_ATOMIC_VOLATILE volatile #if ( KMP_ARCH_X86 ) && KMP_HAVE_QUAD static inline void operator +=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a4_t & lhs, Quad_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a4_t & lhs, Quad_a4_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( Quad_a16_t & lhs, Quad_a16_t & rhs ) { lhs.q /= rhs.q; }; static inline bool operator < ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q < rhs.q; } static inline bool operator > ( Quad_a16_t & lhs, Quad_a16_t & rhs ) { return lhs.q > rhs.q; } static inline void operator +=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a4_t & lhs, kmp_cmplx128_a4_t & rhs ) { lhs.q /= rhs.q; }; static inline void operator +=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q += rhs.q; }; static inline void operator -=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q -= rhs.q; }; static inline void operator *=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q *= rhs.q; }; static inline void operator /=( kmp_cmplx128_a16_t & lhs, kmp_cmplx128_a16_t & rhs ) { lhs.q /= rhs.q; }; #endif /* ------------------------------------------------------------------------ */ /* ATOMIC implementation routines */ /* one routine for each operation and operand type */ /* ------------------------------------------------------------------------ */ // All routines declarations looks like // void __kmpc_atomic_RTYPE_OP( ident_t*, int, TYPE *lhs, TYPE rhs ); // ------------------------------------------------------------------------ #define KMP_CHECK_GTID \ if ( gtid == KMP_GTID_UNKNOWN ) { \ gtid = __kmp_entry_gtid(); \ } // check and get gtid when needed // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Lock variables used for critical sections for various size operands #define ATOMIC_LOCK0 __kmp_atomic_lock // all types, for Gnu compat #define ATOMIC_LOCK1i __kmp_atomic_lock_1i // char #define ATOMIC_LOCK2i __kmp_atomic_lock_2i // short #define ATOMIC_LOCK4i __kmp_atomic_lock_4i // long int #define ATOMIC_LOCK4r __kmp_atomic_lock_4r // float #define ATOMIC_LOCK8i __kmp_atomic_lock_8i // long long int #define ATOMIC_LOCK8r __kmp_atomic_lock_8r // double #define ATOMIC_LOCK8c __kmp_atomic_lock_8c // float complex #define ATOMIC_LOCK10r __kmp_atomic_lock_10r // long double #define ATOMIC_LOCK16r __kmp_atomic_lock_16r // _Quad #define ATOMIC_LOCK16c __kmp_atomic_lock_16c // double complex #define ATOMIC_LOCK20c __kmp_atomic_lock_20c // long double complex #define ATOMIC_LOCK32c __kmp_atomic_lock_32c // _Quad complex // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) OP (rhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ // For GNU compatibility, we may need to use a critical section, // even though it is not required by the ISA. // // On IA-32 architecture, all atomic operations except for fixed 4 byte add, // sub, and bitwise logical ops, and 1 & 2 byte logical ops use a common // critical section. On Intel(R) 64, all atomic operations are done with fetch // and add or compare and exchange. Therefore, the FLAG parameter to this // macro is either KMP_ARCH_X86 or 0 (or 1, for Intel-specific extension which // require a critical section, where we predict that they will be implemented // in the Gnu codegen by calling GOMP_atomic_start() / GOMP_atomic_end()). // // When the OP_GOMP_CRITICAL macro is used in a *CRITICAL* macro construct, // the FLAG parameter should always be 1. If we know that we will be using // a critical section, then we want to make certain that we use the generic // lock __kmp_atomic_lock to protect the atomic update, and not of of the // locks that are specialized based upon the size or type of the data. // // If FLAG is 0, then we are relying on dead code elimination by the build // compiler to get rid of the useless block of code, and save a needless // branch at runtime. // #ifdef KMP_GOMP_COMPAT # define OP_GOMP_CRITICAL(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL( OP, 0 ); \ return; \ } # else # define OP_GOMP_CRITICAL(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ #if KMP_MIC # define KMP_DO_PAUSE _mm_delay_32( 1 ) #else # define KMP_DO_PAUSE KMP_CPU_PAUSE() #endif /* KMP_MIC */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator #define OP_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE old_value, new_value; \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ old_value = *(TYPE volatile *)lhs; \ new_value = old_value OP rhs; \ } \ } #if USE_CMPXCHG_FIX // 2007-06-25: // workaround for C78287 (complex(kind=4) data type) // lin_32, lin_32e, win_32 and win_32e are affected (I verified the asm) // Compiler ignores the volatile qualifier of the temp_val in the OP_CMPXCHG macro. // This is a problem of the compiler. // Related tracker is C76005, targeted to 11.0. // I verified the asm of the workaround. #define OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ { \ struct _sss { \ TYPE cmp; \ kmp_int##BITS *vvv; \ }; \ struct _sss old_value, new_value; \ old_value.vvv = ( kmp_int##BITS * )&old_value.cmp; \ new_value.vvv = ( kmp_int##BITS * )&new_value.cmp; \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) old_value.vvv, \ *VOLATILE_CAST(kmp_int##BITS *) new_value.vvv ) ) \ { \ KMP_DO_PAUSE; \ \ *old_value.vvv = * ( volatile kmp_int##BITS * ) lhs; \ new_value.cmp = old_value.cmp OP rhs; \ } \ } // end of the first part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #endif #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_FIXED_ADD(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #if USE_CMPXCHG_FIX // ------------------------------------------------------------------------- // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_WORKAROUND(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } // end of the second part of the workaround for C78287 #endif // USE_CMPXCHG_FIX #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // Routines for ATOMIC 4-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed4, add, kmp_int32, 32, +, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add ATOMIC_FIXED_ADD( fixed4, sub, kmp_int32, 32, -, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub ATOMIC_CMPXCHG( float4, add, kmp_real32, 32, +, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add ATOMIC_CMPXCHG( float4, sub, kmp_real32, 32, -, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub // Routines for ATOMIC 8-byte operands addition and subtraction ATOMIC_FIXED_ADD( fixed8, add, kmp_int64, 64, +, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add ATOMIC_FIXED_ADD( fixed8, sub, kmp_int64, 64, -, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub ATOMIC_CMPXCHG( float8, add, kmp_real64, 64, +, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add ATOMIC_CMPXCHG( float8, sub, kmp_real64, 64, -, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // MASK - used for alignment check // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,MASK,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG( fixed1, add, kmp_int8, 8, +, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add ATOMIC_CMPXCHG( fixed1, andb, kmp_int8, 8, &, 1i, 0, 0 ) // __kmpc_atomic_fixed1_andb ATOMIC_CMPXCHG( fixed1, div, kmp_int8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div ATOMIC_CMPXCHG( fixed1u, div, kmp_uint8, 8, /, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div ATOMIC_CMPXCHG( fixed1, mul, kmp_int8, 8, *, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul ATOMIC_CMPXCHG( fixed1, orb, kmp_int8, 8, |, 1i, 0, 0 ) // __kmpc_atomic_fixed1_orb ATOMIC_CMPXCHG( fixed1, shl, kmp_int8, 8, <<, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl ATOMIC_CMPXCHG( fixed1, shr, kmp_int8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr ATOMIC_CMPXCHG( fixed1u, shr, kmp_uint8, 8, >>, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr ATOMIC_CMPXCHG( fixed1, sub, kmp_int8, 8, -, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub ATOMIC_CMPXCHG( fixed1, xor, kmp_int8, 8, ^, 1i, 0, 0 ) // __kmpc_atomic_fixed1_xor ATOMIC_CMPXCHG( fixed2, add, kmp_int16, 16, +, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add ATOMIC_CMPXCHG( fixed2, andb, kmp_int16, 16, &, 2i, 1, 0 ) // __kmpc_atomic_fixed2_andb ATOMIC_CMPXCHG( fixed2, div, kmp_int16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div ATOMIC_CMPXCHG( fixed2u, div, kmp_uint16, 16, /, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div ATOMIC_CMPXCHG( fixed2, mul, kmp_int16, 16, *, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul ATOMIC_CMPXCHG( fixed2, orb, kmp_int16, 16, |, 2i, 1, 0 ) // __kmpc_atomic_fixed2_orb ATOMIC_CMPXCHG( fixed2, shl, kmp_int16, 16, <<, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl ATOMIC_CMPXCHG( fixed2, shr, kmp_int16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr ATOMIC_CMPXCHG( fixed2u, shr, kmp_uint16, 16, >>, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr ATOMIC_CMPXCHG( fixed2, sub, kmp_int16, 16, -, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub ATOMIC_CMPXCHG( fixed2, xor, kmp_int16, 16, ^, 2i, 1, 0 ) // __kmpc_atomic_fixed2_xor ATOMIC_CMPXCHG( fixed4, andb, kmp_int32, 32, &, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andb ATOMIC_CMPXCHG( fixed4, div, kmp_int32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div ATOMIC_CMPXCHG( fixed4u, div, kmp_uint32, 32, /, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div ATOMIC_CMPXCHG( fixed4, mul, kmp_int32, 32, *, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul ATOMIC_CMPXCHG( fixed4, orb, kmp_int32, 32, |, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orb ATOMIC_CMPXCHG( fixed4, shl, kmp_int32, 32, <<, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl ATOMIC_CMPXCHG( fixed4, shr, kmp_int32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr ATOMIC_CMPXCHG( fixed4u, shr, kmp_uint32, 32, >>, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr ATOMIC_CMPXCHG( fixed4, xor, kmp_int32, 32, ^, 4i, 3, 0 ) // __kmpc_atomic_fixed4_xor ATOMIC_CMPXCHG( fixed8, andb, kmp_int64, 64, &, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb ATOMIC_CMPXCHG( fixed8, div, kmp_int64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div ATOMIC_CMPXCHG( fixed8u, div, kmp_uint64, 64, /, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div ATOMIC_CMPXCHG( fixed8, mul, kmp_int64, 64, *, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul ATOMIC_CMPXCHG( fixed8, orb, kmp_int64, 64, |, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb ATOMIC_CMPXCHG( fixed8, shl, kmp_int64, 64, <<, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl ATOMIC_CMPXCHG( fixed8, shr, kmp_int64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr ATOMIC_CMPXCHG( fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr ATOMIC_CMPXCHG( fixed8, xor, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor ATOMIC_CMPXCHG( float4, div, kmp_real32, 32, /, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div ATOMIC_CMPXCHG( float4, mul, kmp_real32, 32, *, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul ATOMIC_CMPXCHG( float8, div, kmp_real64, 64, /, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div ATOMIC_CMPXCHG( float8, mul, kmp_real64, 64, *, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul // TYPE_ID,OP_ID, TYPE, OP, LCK_ID, GOMP_FLAG /* ------------------------------------------------------------------------ */ /* Routines for C/C++ Reduction operators && and || */ /* ------------------------------------------------------------------------ */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment // TODO: eliminate ATOMIC_CRIT_{L,EQV} macros as not used #define ATOMIC_CRIT_L(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CRITICAL( = *lhs OP, LCK_ID ) \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_L(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(= *lhs OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(= *lhs OP,LCK_ID) /* unaligned - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPX_L( fixed1, andl, char, 8, &&, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl ATOMIC_CMPX_L( fixed1, orl, char, 8, ||, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl ATOMIC_CMPX_L( fixed2, andl, short, 16, &&, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl ATOMIC_CMPX_L( fixed2, orl, short, 16, ||, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl ATOMIC_CMPX_L( fixed4, andl, kmp_int32, 32, &&, 4i, 3, 0 ) // __kmpc_atomic_fixed4_andl ATOMIC_CMPX_L( fixed4, orl, kmp_int32, 32, ||, 4i, 3, 0 ) // __kmpc_atomic_fixed4_orl ATOMIC_CMPX_L( fixed8, andl, kmp_int64, 64, &&, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl ATOMIC_CMPX_L( fixed8, orl, kmp_int64, 64, ||, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl /* ------------------------------------------------------------------------- */ /* Routines for Fortran operators that matched no one in C: */ /* MAX, MIN, .EQV., .NEQV. */ /* Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl} */ /* Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor} */ /* ------------------------------------------------------------------------- */ // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ *lhs = rhs; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT( OP, 0 ); \ return; \ } #else #define GOMP_MIN_MAX_CRITSECT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value; \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT(OP,LCK_ID) \ } \ } #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) \ } \ } #else // ------------------------------------------------------------------------- // Code for other architectures that don't handle unaligned accesses. #define MIN_MAX_COMPXCHG(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT(OP,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ MIN_MAX_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT(OP,LCK_ID) /* unaligned address */ \ } \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ MIN_MAX_COMPXCHG( fixed1, max, char, 8, <, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max MIN_MAX_COMPXCHG( fixed1, min, char, 8, >, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min MIN_MAX_COMPXCHG( fixed2, max, short, 16, <, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max MIN_MAX_COMPXCHG( fixed2, min, short, 16, >, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min MIN_MAX_COMPXCHG( fixed4, max, kmp_int32, 32, <, 4i, 3, 0 ) // __kmpc_atomic_fixed4_max MIN_MAX_COMPXCHG( fixed4, min, kmp_int32, 32, >, 4i, 3, 0 ) // __kmpc_atomic_fixed4_min MIN_MAX_COMPXCHG( fixed8, max, kmp_int64, 64, <, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max MIN_MAX_COMPXCHG( fixed8, min, kmp_int64, 64, >, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min MIN_MAX_COMPXCHG( float4, max, kmp_real32, 32, <, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max MIN_MAX_COMPXCHG( float4, min, kmp_real32, 32, >, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min MIN_MAX_COMPXCHG( float8, max, kmp_real64, 64, <, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max MIN_MAX_COMPXCHG( float8, min, kmp_real64, 64, >, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min #if KMP_HAVE_QUAD MIN_MAX_CRITICAL( float16, max, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max MIN_MAX_CRITICAL( float16, min, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL( float16, max_a16, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16 MIN_MAX_CRITICAL( float16, min_a16, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_min_a16 #endif #endif // ------------------------------------------------------------------------ // Need separate macros for .EQV. because of the need of complement (~) // OP ignored for critical sections, ^=~ used instead #define ATOMIC_CRIT_EQV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(^=~,LCK_ID) /* send assignment and complement */ \ } // ------------------------------------------------------------------------ #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems =================================== #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPX_EQV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(^=~,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(^=~,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG( fixed1, neqv, kmp_int8, 8, ^, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv ATOMIC_CMPXCHG( fixed2, neqv, kmp_int16, 16, ^, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv ATOMIC_CMPXCHG( fixed4, neqv, kmp_int32, 32, ^, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv ATOMIC_CMPXCHG( fixed8, neqv, kmp_int64, 64, ^, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv ATOMIC_CMPX_EQV( fixed1, eqv, kmp_int8, 8, ^~, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv ATOMIC_CMPX_EQV( fixed2, eqv, kmp_int16, 16, ^~, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv ATOMIC_CMPX_EQV( fixed4, eqv, kmp_int32, 32, ^~, 4i, 3, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv ATOMIC_CMPX_EQV( fixed8, eqv, kmp_int64, 64, ^~, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL( float10, add, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add ATOMIC_CRITICAL( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub ATOMIC_CRITICAL( float10, mul, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul ATOMIC_CRITICAL( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL( float16, add, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add ATOMIC_CRITICAL( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub ATOMIC_CRITICAL( float16, mul, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul ATOMIC_CRITICAL( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( float16, add_a16, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16 ATOMIC_CRITICAL( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16 ATOMIC_CRITICAL( float16, mul_a16, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16 ATOMIC_CRITICAL( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16 #endif #endif // routines for complex types #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) ATOMIC_CMPXCHG_WORKAROUND( cmplx4, add, kmp_cmplx32, 64, +, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CMPXCHG_WORKAROUND( cmplx4, sub, kmp_cmplx32, 64, -, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CMPXCHG_WORKAROUND( cmplx4, mul, kmp_cmplx32, 64, *, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CMPXCHG_WORKAROUND( cmplx4, div, kmp_cmplx32, 64, /, 8c, 7, 1 ) // __kmpc_atomic_cmplx4_div // end of the workaround for C78287 #else ATOMIC_CRITICAL( cmplx4, add, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add ATOMIC_CRITICAL( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub ATOMIC_CRITICAL( cmplx4, mul, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul ATOMIC_CRITICAL( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div #endif // USE_CMPXCHG_FIX ATOMIC_CRITICAL( cmplx8, add, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add ATOMIC_CRITICAL( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub ATOMIC_CRITICAL( cmplx8, mul, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul ATOMIC_CRITICAL( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div ATOMIC_CRITICAL( cmplx10, add, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add ATOMIC_CRITICAL( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub ATOMIC_CRITICAL( cmplx10, mul, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul ATOMIC_CRITICAL( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div #if KMP_HAVE_QUAD ATOMIC_CRITICAL( cmplx16, add, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add ATOMIC_CRITICAL( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub ATOMIC_CRITICAL( cmplx16, mul, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul ATOMIC_CRITICAL( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL( cmplx16, add_a16, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16 ATOMIC_CRITICAL( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16 ATOMIC_CRITICAL( cmplx16, mul_a16, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16 ATOMIC_CRITICAL( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16 #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: x = expr binop x for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*lhs) = (rhs) OP (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_REV( OP, 0 ); \ return; \ } #else #define OP_GOMP_CRITICAL_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_rev( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_rev: T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_DO_PAUSE; \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_REV(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // LCK_ID - lock identifier, used to possibly distinguish lock variable // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, BITS, OP, LCK_ID, GOMP_FLAG ATOMIC_CMPXCHG_REV( fixed1, div, kmp_int8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev ATOMIC_CMPXCHG_REV( fixed1u, div, kmp_uint8, 8, /, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev ATOMIC_CMPXCHG_REV( fixed1, shl, kmp_int8, 8, <<, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_rev ATOMIC_CMPXCHG_REV( fixed1, shr, kmp_int8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_rev ATOMIC_CMPXCHG_REV( fixed1u, shr, kmp_uint8, 8, >>, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_rev ATOMIC_CMPXCHG_REV( fixed1, sub, kmp_int8, 8, -, 1i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev ATOMIC_CMPXCHG_REV( fixed2, div, kmp_int16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev ATOMIC_CMPXCHG_REV( fixed2u, div, kmp_uint16, 16, /, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev ATOMIC_CMPXCHG_REV( fixed2, shl, kmp_int16, 16, <<, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_rev ATOMIC_CMPXCHG_REV( fixed2, shr, kmp_int16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_rev ATOMIC_CMPXCHG_REV( fixed2u, shr, kmp_uint16, 16, >>, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_rev ATOMIC_CMPXCHG_REV( fixed2, sub, kmp_int16, 16, -, 2i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev ATOMIC_CMPXCHG_REV( fixed4, div, kmp_int32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_rev ATOMIC_CMPXCHG_REV( fixed4u, div, kmp_uint32, 32, /, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_rev ATOMIC_CMPXCHG_REV( fixed4, shl, kmp_int32, 32, <<, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_rev ATOMIC_CMPXCHG_REV( fixed4, shr, kmp_int32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_rev ATOMIC_CMPXCHG_REV( fixed4u, shr, kmp_uint32, 32, >>, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_rev ATOMIC_CMPXCHG_REV( fixed4, sub, kmp_int32, 32, -, 4i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_rev ATOMIC_CMPXCHG_REV( fixed8, div, kmp_int64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev ATOMIC_CMPXCHG_REV( fixed8u, div, kmp_uint64, 64, /, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev ATOMIC_CMPXCHG_REV( fixed8, shl, kmp_int64, 64, <<, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_rev ATOMIC_CMPXCHG_REV( fixed8, shr, kmp_int64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_rev ATOMIC_CMPXCHG_REV( fixed8u, shr, kmp_uint64, 64, >>, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_rev ATOMIC_CMPXCHG_REV( fixed8, sub, kmp_int64, 64, -, 8i, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev ATOMIC_CMPXCHG_REV( float4, div, kmp_real32, 32, /, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev ATOMIC_CMPXCHG_REV( float4, sub, kmp_real32, 32, -, 4r, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev ATOMIC_CMPXCHG_REV( float8, div, kmp_real64, 64, /, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev ATOMIC_CMPXCHG_REV( float8, sub, kmp_real64, 64, -, 8r, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev // TYPE_ID,OP_ID, TYPE, BITS,OP,LCK_ID, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_REV(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CRITICAL_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_REV( float10, sub, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_rev ATOMIC_CRITICAL_REV( float10, div, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_REV( float16, sub, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_rev ATOMIC_CRITICAL_REV( float16, div, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( float16, sub_a16, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_rev ATOMIC_CRITICAL_REV( float16, div_a16, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_rev #endif #endif // routines for complex types ATOMIC_CRITICAL_REV( cmplx4, sub, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_rev ATOMIC_CRITICAL_REV( cmplx4, div, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_rev ATOMIC_CRITICAL_REV( cmplx8, sub, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_rev ATOMIC_CRITICAL_REV( cmplx8, div, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_rev ATOMIC_CRITICAL_REV( cmplx10, sub, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_rev ATOMIC_CRITICAL_REV( cmplx10, div, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_REV( cmplx16, sub, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_rev ATOMIC_CRITICAL_REV( cmplx16, div, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_REV( cmplx16, sub_a16, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_rev ATOMIC_CRITICAL_REV( cmplx16, div_a16, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_rev #endif #endif #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 // End of OpenMP 4.0: x = expr binop x for non-commutative operations. #endif //OMP_40_ENABLED /* ------------------------------------------------------------------------ */ /* Routines for mixed types of LHS and RHS, when RHS is "larger" */ /* Note: in order to reduce the total number of types combinations */ /* it is supposed that compiler converts RHS to longest floating type,*/ /* that is _Quad, before call to any of these routines */ /* Conversion to _Quad will be done by the compiler during calculation, */ /* conversion back to TYPE - before the assignment, like: */ /* *lhs = (TYPE)( (_Quad)(*lhs) OP rhs ) */ /* Performance penalty expected because of SW emulation use */ /* ------------------------------------------------------------------------ */ #define ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- // X86 or X86_64: no alignment problems ==================================== #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // ------------------------------------------------------------------------- #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_REV_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_REV(TYPE,BITS,OP) \ } #define ATOMIC_CRITICAL_REV_FP(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL_REV(OP,GOMP_FLAG) \ OP_CRITICAL_REV(OP,LCK_ID) \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ // RHS=float8 ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_float8 ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, float8, kmp_real64, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_float8 ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, float8, kmp_real64, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_float8 ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, float8, kmp_real64, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_float8 ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, float8, kmp_real64, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_float8 ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, float8, kmp_real64, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_float8 // RHS=float16 (deprecated, to be removed when we are sure the compiler does not use them) #if KMP_HAVE_QUAD ATOMIC_CMPXCHG_MIX( fixed1, char, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, add, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_add_fp ATOMIC_CMPXCHG_MIX( fixed1, char, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, sub, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_fp ATOMIC_CMPXCHG_MIX( fixed1, char, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, mul, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_mul_fp ATOMIC_CMPXCHG_MIX( fixed1, char, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_fp ATOMIC_CMPXCHG_MIX( fixed1u, uchar, div, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_fp ATOMIC_CMPXCHG_MIX( fixed2, short, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, add, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_add_fp ATOMIC_CMPXCHG_MIX( fixed2, short, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, sub, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_fp ATOMIC_CMPXCHG_MIX( fixed2, short, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, mul, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_mul_fp ATOMIC_CMPXCHG_MIX( fixed2, short, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_fp ATOMIC_CMPXCHG_MIX( fixed2u, ushort, div, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, add, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_add_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, sub, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, mul, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_mul_fp ATOMIC_CMPXCHG_MIX( fixed4, kmp_int32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_fp ATOMIC_CMPXCHG_MIX( fixed4u, kmp_uint32, div, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, add, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_add_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, sub, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, mul, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_mul_fp ATOMIC_CMPXCHG_MIX( fixed8, kmp_int64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_fp ATOMIC_CMPXCHG_MIX( fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, add, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, sub, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, mul, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_fp ATOMIC_CMPXCHG_MIX( float4, kmp_real32, div, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, add, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, sub, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, mul, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_fp ATOMIC_CMPXCHG_MIX( float8, kmp_real64, div, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_fp ATOMIC_CRITICAL_FP( float10, long double, add, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_fp ATOMIC_CRITICAL_FP( float10, long double, sub, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_fp ATOMIC_CRITICAL_FP( float10, long double, mul, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_fp ATOMIC_CRITICAL_FP( float10, long double, div, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_fp #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // Reverse operations ATOMIC_CMPXCHG_REV_MIX( fixed1, char, sub_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed1u, uchar, sub_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed1, char, div_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed1u, uchar, div_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed2, short, sub_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed2u, ushort, sub_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed2, short, div_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed2u, ushort, div_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed4, kmp_int32, sub_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed4u, kmp_uint32, sub_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed4, kmp_int32, div_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed4u, kmp_uint32, div_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed8, kmp_int64, sub_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed8u, kmp_uint64, sub_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed8, kmp_int64, div_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( fixed8u, kmp_uint64, div_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( float4, kmp_real32, sub_rev, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( float4, kmp_real32, div_rev, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_rev_fp ATOMIC_CMPXCHG_REV_MIX( float8, kmp_real64, sub_rev, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_rev_fp ATOMIC_CMPXCHG_REV_MIX( float8, kmp_real64, div_rev, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_rev_fp ATOMIC_CRITICAL_REV_FP( float10, long double, sub_rev, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_rev_fp ATOMIC_CRITICAL_REV_FP( float10, long double, div_rev, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_rev_fp #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ #endif #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // ------------------------------------------------------------------------ // X86 or X86_64: no alignment problems ==================================== #if USE_CMPXCHG_FIX // workaround for C78287 (complex(kind=4) data type) #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG_WORKAROUND(TYPE,BITS,OP) \ } // end of the second part of the workaround for C78287 #else #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ OP_CMPXCHG(TYPE,BITS,OP) \ } #endif // USE_CMPXCHG_FIX #else // ------------------------------------------------------------------------ // Code for other architectures that don't handle unaligned accesses. #define ATOMIC_CMPXCHG_CMPLX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_MIX(TYPE_ID,TYPE,OP_ID,RTYPE_ID,RTYPE) \ OP_GOMP_CRITICAL(OP##=,GOMP_FLAG) \ if ( ! ( (kmp_uintptr_t) lhs & 0x##MASK) ) { \ OP_CMPXCHG(TYPE,BITS,OP) /* aligned address */ \ } else { \ KMP_CHECK_GTID; \ OP_CRITICAL(OP##=,LCK_ID) /* unaligned address - use critical */ \ } \ } #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, add, 64, +, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_add_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, sub, 64, -, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_sub_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, mul, 64, *, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_mul_cmplx8 ATOMIC_CMPXCHG_CMPLX( cmplx4, kmp_cmplx32, div, 64, /, cmplx8, kmp_cmplx64, 8c, 7, KMP_ARCH_X86 ) // __kmpc_atomic_cmplx4_div_cmplx8 // READ, WRITE, CAPTURE are supported only on IA-32 architecture and Intel(R) 64 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 ////////////////////////////////////////////////////////////////////////////////////////////////////// // ------------------------------------------------------------------------ // Atomic READ routines // ------------------------------------------------------------------------ // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE, RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store_ret" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) // TODO: check if it is still necessary // Return old value regardless of the result of "compare & swap# operation #define OP_CMPXCHG_READ(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ union f_i_union { \ TYPE f_val; \ kmp_int##BITS i_val; \ }; \ union f_i_union old_value; \ temp_val = *loc; \ old_value.f_val = temp_val; \ old_value.i_val = KMP_COMPARE_AND_STORE_RET##BITS( (kmp_int##BITS *) loc, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value.i_val ); \ new_value = old_value.f_val; \ return new_value; \ } // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_READ(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ new_value = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_READ(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define ATOMIC_FIXED_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ new_value = KMP_TEST_THEN_ADD##BITS( loc, OP 0 ); \ return new_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_READ(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) \ OP_CMPXCHG_READ(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_READ(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_READ(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ(OP,LCK_ID) /* send assignment */ \ return new_value; \ } // ------------------------------------------------------------------------ // Fix for cmplx4 read (CQ220361) on Windows* OS. Regular routine with return value doesn't work. // Let's return the read value through the additional parameter. #if ( KMP_OS_WINDOWS ) #define OP_CRITICAL_READ_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ (*out) = (*loc); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_READ_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_READ_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( TYPE * out, ident_t *id_ref, int gtid, TYPE * loc ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_READ_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_READ_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_READ_WRK(OP##=,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_READ_WRK(OP,LCK_ID) /* send assignment */ \ } #endif // KMP_OS_WINDOWS // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_FIXED_READ( fixed4, rd, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_rd ATOMIC_FIXED_READ( fixed8, rd, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_rd ATOMIC_CMPXCHG_READ( float4, rd, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_rd ATOMIC_CMPXCHG_READ( float8, rd, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_rd // !!! TODO: Remove lock operations for "char" since it can't be non-atomic ATOMIC_CMPXCHG_READ( fixed1, rd, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_rd ATOMIC_CMPXCHG_READ( fixed2, rd, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_rd ATOMIC_CRITICAL_READ( float10, rd, long double, +, 10r, 1 ) // __kmpc_atomic_float10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( float16, rd, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_rd #endif // KMP_HAVE_QUAD // Fix for CQ220361 on Windows* OS #if ( KMP_OS_WINDOWS ) ATOMIC_CRITICAL_READ_WRK( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #else ATOMIC_CRITICAL_READ( cmplx4, rd, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_rd #endif ATOMIC_CRITICAL_READ( cmplx8, rd, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_rd ATOMIC_CRITICAL_READ( cmplx10, rd, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_rd #if KMP_HAVE_QUAD ATOMIC_CRITICAL_READ( cmplx16, rd, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_rd #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_READ( float16, a16_rd, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_a16_rd ATOMIC_CRITICAL_READ( cmplx16, a16_rd, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_rd #endif #endif // ------------------------------------------------------------------------ // Atomic WRITE routines // ------------------------------------------------------------------------ #define ATOMIC_XCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_FIXED##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ KMP_XCHG_REAL##BITS( lhs, rhs ); \ } // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_WR(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_WR(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) \ OP_CMPXCHG_WR(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_WR(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN(TYPE_ID,OP_ID,TYPE,void) \ OP_GOMP_CRITICAL(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL(OP,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------- ATOMIC_XCHG_WR( fixed1, wr, kmp_int8, 8, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_wr ATOMIC_XCHG_WR( fixed2, wr, kmp_int16, 16, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_wr ATOMIC_XCHG_WR( fixed4, wr, kmp_int32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #else ATOMIC_XCHG_WR( fixed8, wr, kmp_int64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_wr #endif ATOMIC_XCHG_FLOAT_WR( float4, wr, kmp_real32, 32, =, KMP_ARCH_X86 ) // __kmpc_atomic_float4_wr #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #else ATOMIC_XCHG_FLOAT_WR( float8, wr, kmp_real64, 64, =, KMP_ARCH_X86 ) // __kmpc_atomic_float8_wr #endif ATOMIC_CRITICAL_WR( float10, wr, long double, =, 10r, 1 ) // __kmpc_atomic_float10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( float16, wr, QUAD_LEGACY, =, 16r, 1 ) // __kmpc_atomic_float16_wr #endif ATOMIC_CRITICAL_WR( cmplx4, wr, kmp_cmplx32, =, 8c, 1 ) // __kmpc_atomic_cmplx4_wr ATOMIC_CRITICAL_WR( cmplx8, wr, kmp_cmplx64, =, 16c, 1 ) // __kmpc_atomic_cmplx8_wr ATOMIC_CRITICAL_WR( cmplx10, wr, kmp_cmplx80, =, 20c, 1 ) // __kmpc_atomic_cmplx10_wr #if KMP_HAVE_QUAD ATOMIC_CRITICAL_WR( cmplx16, wr, CPLX128_LEG, =, 32c, 1 ) // __kmpc_atomic_cmplx16_wr #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_WR( float16, a16_wr, Quad_a16_t, =, 16r, 1 ) // __kmpc_atomic_float16_a16_wr ATOMIC_CRITICAL_WR( cmplx16, a16_wr, kmp_cmplx128_a16_t, =, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_wr #endif #endif // ------------------------------------------------------------------------ // Atomic CAPTURE routines // ------------------------------------------------------------------------ // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,RET_TYPE) \ RET_TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ new_value = (*lhs); \ } else { \ new_value = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = old_value OP rhs; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_FIXED_ADD_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE old_value, new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ /* OP used as a sign for subtraction: (lhs-rhs) --> (lhs+-rhs) */ \ old_value = KMP_TEST_THEN_ADD##BITS( lhs, OP rhs ); \ if( flag ) { \ return old_value OP rhs; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- ATOMIC_FIXED_ADD_CPT( fixed4, add_cpt, kmp_int32, 32, +, 0 ) // __kmpc_atomic_fixed4_add_cpt ATOMIC_FIXED_ADD_CPT( fixed4, sub_cpt, kmp_int32, 32, -, 0 ) // __kmpc_atomic_fixed4_sub_cpt ATOMIC_FIXED_ADD_CPT( fixed8, add_cpt, kmp_int64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt ATOMIC_FIXED_ADD_CPT( fixed8, sub_cpt, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt ATOMIC_CMPXCHG_CPT( float4, add_cpt, kmp_real32, 32, +, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt ATOMIC_CMPXCHG_CPT( float4, sub_cpt, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt ATOMIC_CMPXCHG_CPT( float8, add_cpt, kmp_real64, 64, +, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt ATOMIC_CMPXCHG_CPT( float8, sub_cpt, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt // ------------------------------------------------------------------------ // Entries definition for integer operands // TYPE_ID - operands type and size (fixed4, float4) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operand type // BITS - size in bits, used to distinguish low level calls // OP - operator (used in critical section) // TYPE_ID,OP_ID, TYPE, BITS,OP,GOMP_FLAG // ------------------------------------------------------------------------ // Routines for ATOMIC integer operands, other operators // ------------------------------------------------------------------------ // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ATOMIC_CMPXCHG_CPT( fixed1, add_cpt, kmp_int8, 8, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt ATOMIC_CMPXCHG_CPT( fixed1, andb_cpt, kmp_int8, 8, &, 0 ) // __kmpc_atomic_fixed1_andb_cpt ATOMIC_CMPXCHG_CPT( fixed1, div_cpt, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt ATOMIC_CMPXCHG_CPT( fixed1u, div_cpt, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt ATOMIC_CMPXCHG_CPT( fixed1, mul_cpt, kmp_int8, 8, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt ATOMIC_CMPXCHG_CPT( fixed1, orb_cpt, kmp_int8, 8, |, 0 ) // __kmpc_atomic_fixed1_orb_cpt ATOMIC_CMPXCHG_CPT( fixed1, shl_cpt, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt ATOMIC_CMPXCHG_CPT( fixed1, shr_cpt, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1u, shr_cpt, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed1, sub_cpt, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt ATOMIC_CMPXCHG_CPT( fixed1, xor_cpt, kmp_int8, 8, ^, 0 ) // __kmpc_atomic_fixed1_xor_cpt ATOMIC_CMPXCHG_CPT( fixed2, add_cpt, kmp_int16, 16, +, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt ATOMIC_CMPXCHG_CPT( fixed2, andb_cpt, kmp_int16, 16, &, 0 ) // __kmpc_atomic_fixed2_andb_cpt ATOMIC_CMPXCHG_CPT( fixed2, div_cpt, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt ATOMIC_CMPXCHG_CPT( fixed2u, div_cpt, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt ATOMIC_CMPXCHG_CPT( fixed2, mul_cpt, kmp_int16, 16, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt ATOMIC_CMPXCHG_CPT( fixed2, orb_cpt, kmp_int16, 16, |, 0 ) // __kmpc_atomic_fixed2_orb_cpt ATOMIC_CMPXCHG_CPT( fixed2, shl_cpt, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt ATOMIC_CMPXCHG_CPT( fixed2, shr_cpt, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2u, shr_cpt, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed2, sub_cpt, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt ATOMIC_CMPXCHG_CPT( fixed2, xor_cpt, kmp_int16, 16, ^, 0 ) // __kmpc_atomic_fixed2_xor_cpt ATOMIC_CMPXCHG_CPT( fixed4, andb_cpt, kmp_int32, 32, &, 0 ) // __kmpc_atomic_fixed4_andb_cpt ATOMIC_CMPXCHG_CPT( fixed4, div_cpt, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt ATOMIC_CMPXCHG_CPT( fixed4u, div_cpt, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt ATOMIC_CMPXCHG_CPT( fixed4, mul_cpt, kmp_int32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_mul_cpt ATOMIC_CMPXCHG_CPT( fixed4, orb_cpt, kmp_int32, 32, |, 0 ) // __kmpc_atomic_fixed4_orb_cpt ATOMIC_CMPXCHG_CPT( fixed4, shl_cpt, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt ATOMIC_CMPXCHG_CPT( fixed4, shr_cpt, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4u, shr_cpt, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed4, xor_cpt, kmp_int32, 32, ^, 0 ) // __kmpc_atomic_fixed4_xor_cpt ATOMIC_CMPXCHG_CPT( fixed8, andb_cpt, kmp_int64, 64, &, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andb_cpt ATOMIC_CMPXCHG_CPT( fixed8, div_cpt, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt ATOMIC_CMPXCHG_CPT( fixed8u, div_cpt, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt ATOMIC_CMPXCHG_CPT( fixed8, mul_cpt, kmp_int64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt ATOMIC_CMPXCHG_CPT( fixed8, orb_cpt, kmp_int64, 64, |, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orb_cpt ATOMIC_CMPXCHG_CPT( fixed8, shl_cpt, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt ATOMIC_CMPXCHG_CPT( fixed8, shr_cpt, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8u, shr_cpt, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt ATOMIC_CMPXCHG_CPT( fixed8, xor_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_xor_cpt ATOMIC_CMPXCHG_CPT( float4, div_cpt, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt ATOMIC_CMPXCHG_CPT( float4, mul_cpt, kmp_real32, 32, *, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt ATOMIC_CMPXCHG_CPT( float8, div_cpt, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt ATOMIC_CMPXCHG_CPT( float8, mul_cpt, kmp_real64, 64, *, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG ////////////////////////////////// // CAPTURE routines for mixed types RHS=float16 #if KMP_HAVE_QUAD // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type #define ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE __kmpc_atomic_##TYPE_ID##_##OP_ID##_##RTYPE_ID( ident_t *id_ref, int gtid, TYPE * lhs, RTYPE rhs, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID "_" #RTYPE_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_CPT_MIX(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ } ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, add_cpt, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, add_cpt, 8, +, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, sub_cpt, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, sub_cpt, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, mul_cpt, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, mul_cpt, 8, *, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1, char, div_cpt, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed1u, uchar, div_cpt, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, add_cpt, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, add_cpt, 16, +, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, sub_cpt, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, sub_cpt, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, mul_cpt, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, mul_cpt, 16, *, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2, short, div_cpt, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed2u, ushort, div_cpt, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, add_cpt, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, add_cpt, 32, +, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, sub_cpt, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, sub_cpt, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, mul_cpt, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, mul_cpt, 32, *, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4, kmp_int32, div_cpt, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed4u, kmp_uint32, div_cpt, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, add_cpt, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, add_cpt, 64, +, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, sub_cpt, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, sub_cpt, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, mul_cpt, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, mul_cpt, 64, *, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8, kmp_int64, div_cpt, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( fixed8u, kmp_uint64, div_cpt, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, add_cpt, 32, +, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, sub_cpt, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, mul_cpt, 32, *, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float4, kmp_real32, div_cpt, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, add_cpt, 64, +, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_add_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, sub_cpt, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, mul_cpt, 64, *, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_mul_cpt_fp ATOMIC_CMPXCHG_CPT_MIX( float8, kmp_real64, div_cpt, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_fp ATOMIC_CRITICAL_CPT_MIX( float10, long double, add_cpt, +, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_add_cpt_fp ATOMIC_CRITICAL_CPT_MIX( float10, long double, sub_cpt, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_fp ATOMIC_CRITICAL_CPT_MIX( float10, long double, mul_cpt, *, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt_fp ATOMIC_CRITICAL_CPT_MIX( float10, long double, div_cpt, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_fp #endif //KMP_HAVE_QUAD /////////////////////////////////// // ------------------------------------------------------------------------ // Routines for C/C++ Reduction operators && and || // ------------------------------------------------------------------------ // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_L_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ new_value OP rhs; \ } else \ new_value = (*lhs); \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_L_CPT( OP, 0 ); \ return new_value; \ } #else #define OP_GOMP_CRITICAL_L_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Need separate macros for &&, || because there is no combined assignment #define ATOMIC_CMPX_L_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_L_CPT( = *lhs OP, GOMP_FLAG ) \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } ATOMIC_CMPX_L_CPT( fixed1, andl_cpt, char, 8, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_andl_cpt ATOMIC_CMPX_L_CPT( fixed1, orl_cpt, char, 8, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_orl_cpt ATOMIC_CMPX_L_CPT( fixed2, andl_cpt, short, 16, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_andl_cpt ATOMIC_CMPX_L_CPT( fixed2, orl_cpt, short, 16, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_orl_cpt ATOMIC_CMPX_L_CPT( fixed4, andl_cpt, kmp_int32, 32, &&, 0 ) // __kmpc_atomic_fixed4_andl_cpt ATOMIC_CMPX_L_CPT( fixed4, orl_cpt, kmp_int32, 32, ||, 0 ) // __kmpc_atomic_fixed4_orl_cpt ATOMIC_CMPX_L_CPT( fixed8, andl_cpt, kmp_int64, 64, &&, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_andl_cpt ATOMIC_CMPX_L_CPT( fixed8, orl_cpt, kmp_int64, 64, ||, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_orl_cpt // ------------------------------------------------------------------------- // Routines for Fortran operators that matched no one in C: // MAX, MIN, .EQV., .NEQV. // Operators .AND., .OR. are covered by __kmpc_atomic_*_{andl,orl}_cpt // Intrinsics IAND, IOR, IEOR are covered by __kmpc_atomic_*_{andb,orb,xor}_cpt // ------------------------------------------------------------------------- // ------------------------------------------------------------------------- // MIN and MAX need separate macros // OP - operator to check if we need any actions? #define MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if ( *lhs OP rhs ) { /* still need actions? */ \ old_value = *lhs; \ *lhs = rhs; \ if ( flag ) \ new_value = rhs; \ else \ new_value = old_value; \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; \ // ------------------------------------------------------------------------- #ifdef KMP_GOMP_COMPAT #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) \ if (( FLAG ) && ( __kmp_atomic_mode == 2 )) { \ KMP_CHECK_GTID; \ MIN_MAX_CRITSECT_CPT( OP, 0 ); \ } #else #define GOMP_MIN_MAX_CRITSECT_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------- #define MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*TYPE old_value; */ \ temp_val = *lhs; \ old_value = temp_val; \ while ( old_value OP rhs && /* still need actions? */ \ ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &rhs ) ) \ { \ KMP_CPU_PAUSE(); \ temp_val = *lhs; \ old_value = temp_val; \ } \ if( flag ) \ return rhs; \ else \ return old_value; \ } // ------------------------------------------------------------------------- // 1-byte, 2-byte operands - use critical section #define MIN_MAX_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { /* need actions? */ \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CRITSECT_CPT(OP,LCK_ID) \ } \ return *lhs; \ } #define MIN_MAX_COMPXCHG_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value, old_value; \ if ( *lhs OP rhs ) { \ GOMP_MIN_MAX_CRITSECT_CPT(OP,GOMP_FLAG) \ MIN_MAX_CMPXCHG_CPT(TYPE,BITS,OP) \ } \ return *lhs; \ } MIN_MAX_COMPXCHG_CPT( fixed1, max_cpt, char, 8, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_max_cpt MIN_MAX_COMPXCHG_CPT( fixed1, min_cpt, char, 8, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_min_cpt MIN_MAX_COMPXCHG_CPT( fixed2, max_cpt, short, 16, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_max_cpt MIN_MAX_COMPXCHG_CPT( fixed2, min_cpt, short, 16, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_min_cpt MIN_MAX_COMPXCHG_CPT( fixed4, max_cpt, kmp_int32, 32, <, 0 ) // __kmpc_atomic_fixed4_max_cpt MIN_MAX_COMPXCHG_CPT( fixed4, min_cpt, kmp_int32, 32, >, 0 ) // __kmpc_atomic_fixed4_min_cpt MIN_MAX_COMPXCHG_CPT( fixed8, max_cpt, kmp_int64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_max_cpt MIN_MAX_COMPXCHG_CPT( fixed8, min_cpt, kmp_int64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_min_cpt MIN_MAX_COMPXCHG_CPT( float4, max_cpt, kmp_real32, 32, <, KMP_ARCH_X86 ) // __kmpc_atomic_float4_max_cpt MIN_MAX_COMPXCHG_CPT( float4, min_cpt, kmp_real32, 32, >, KMP_ARCH_X86 ) // __kmpc_atomic_float4_min_cpt MIN_MAX_COMPXCHG_CPT( float8, max_cpt, kmp_real64, 64, <, KMP_ARCH_X86 ) // __kmpc_atomic_float8_max_cpt MIN_MAX_COMPXCHG_CPT( float8, min_cpt, kmp_real64, 64, >, KMP_ARCH_X86 ) // __kmpc_atomic_float8_min_cpt #if KMP_HAVE_QUAD MIN_MAX_CRITICAL_CPT( float16, max_cpt, QUAD_LEGACY, <, 16r, 1 ) // __kmpc_atomic_float16_max_cpt MIN_MAX_CRITICAL_CPT( float16, min_cpt, QUAD_LEGACY, >, 16r, 1 ) // __kmpc_atomic_float16_min_cpt #if ( KMP_ARCH_X86 ) MIN_MAX_CRITICAL_CPT( float16, max_a16_cpt, Quad_a16_t, <, 16r, 1 ) // __kmpc_atomic_float16_max_a16_cpt MIN_MAX_CRITICAL_CPT( float16, min_a16_cpt, Quad_a16_t, >, 16r, 1 ) // __kmpc_atomic_float16_mix_a16_cpt #endif #endif // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_EQV_CPT(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CMPX_EQV_CPT(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_EQV_CPT(^=~,GOMP_FLAG) /* send assignment */ \ OP_CMPXCHG_CPT(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------ ATOMIC_CMPXCHG_CPT( fixed1, neqv_cpt, kmp_int8, 8, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed2, neqv_cpt, kmp_int16, 16, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed4, neqv_cpt, kmp_int32, 32, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_neqv_cpt ATOMIC_CMPXCHG_CPT( fixed8, neqv_cpt, kmp_int64, 64, ^, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_neqv_cpt ATOMIC_CMPX_EQV_CPT( fixed1, eqv_cpt, kmp_int8, 8, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed2, eqv_cpt, kmp_int16, 16, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed4, eqv_cpt, kmp_int32, 32, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_eqv_cpt ATOMIC_CMPX_EQV_CPT( fixed8, eqv_cpt, kmp_int64, 64, ^~, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_eqv_cpt // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT(OP##=,LCK_ID) /* send assignment */ \ } // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) OP rhs; \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) OP rhs; \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_WRK( OP##=, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_##OP_ID( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out, int flag ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_" #OP_ID ": T#%d\n", gtid )); // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_WRK(OP##=,LCK_ID) \ } // The end of workaround for cmplx4 /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT( float10, add_cpt, long double, +, 10r, 1 ) // __kmpc_atomic_float10_add_cpt ATOMIC_CRITICAL_CPT( float10, sub_cpt, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt ATOMIC_CRITICAL_CPT( float10, mul_cpt, long double, *, 10r, 1 ) // __kmpc_atomic_float10_mul_cpt ATOMIC_CRITICAL_CPT( float10, div_cpt, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT( float16, add_cpt, QUAD_LEGACY, +, 16r, 1 ) // __kmpc_atomic_float16_add_cpt ATOMIC_CRITICAL_CPT( float16, sub_cpt, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt ATOMIC_CRITICAL_CPT( float16, mul_cpt, QUAD_LEGACY, *, 16r, 1 ) // __kmpc_atomic_float16_mul_cpt ATOMIC_CRITICAL_CPT( float16, div_cpt, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( float16, add_a16_cpt, Quad_a16_t, +, 16r, 1 ) // __kmpc_atomic_float16_add_a16_cpt ATOMIC_CRITICAL_CPT( float16, sub_a16_cpt, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt ATOMIC_CRITICAL_CPT( float16, mul_a16_cpt, Quad_a16_t, *, 16r, 1 ) // __kmpc_atomic_float16_mul_a16_cpt ATOMIC_CRITICAL_CPT( float16, div_a16_cpt, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt #endif #endif // routines for complex types // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_WRK( cmplx4, add_cpt, kmp_cmplx32, +, 8c, 1 ) // __kmpc_atomic_cmplx4_add_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, sub_cpt, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, mul_cpt, kmp_cmplx32, *, 8c, 1 ) // __kmpc_atomic_cmplx4_mul_cpt ATOMIC_CRITICAL_CPT_WRK( cmplx4, div_cpt, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt ATOMIC_CRITICAL_CPT( cmplx8, add_cpt, kmp_cmplx64, +, 16c, 1 ) // __kmpc_atomic_cmplx8_add_cpt ATOMIC_CRITICAL_CPT( cmplx8, sub_cpt, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt ATOMIC_CRITICAL_CPT( cmplx8, mul_cpt, kmp_cmplx64, *, 16c, 1 ) // __kmpc_atomic_cmplx8_mul_cpt ATOMIC_CRITICAL_CPT( cmplx8, div_cpt, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt ATOMIC_CRITICAL_CPT( cmplx10, add_cpt, kmp_cmplx80, +, 20c, 1 ) // __kmpc_atomic_cmplx10_add_cpt ATOMIC_CRITICAL_CPT( cmplx10, sub_cpt, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt ATOMIC_CRITICAL_CPT( cmplx10, mul_cpt, kmp_cmplx80, *, 20c, 1 ) // __kmpc_atomic_cmplx10_mul_cpt ATOMIC_CRITICAL_CPT( cmplx10, div_cpt, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT( cmplx16, add_cpt, CPLX128_LEG, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_cpt, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_cpt, CPLX128_LEG, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_cpt, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT( cmplx16, add_a16_cpt, kmp_cmplx128_a16_t, +, 32c, 1 ) // __kmpc_atomic_cmplx16_add_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, sub_a16_cpt, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, mul_a16_cpt, kmp_cmplx128_a16_t, *, 32c, 1 ) // __kmpc_atomic_cmplx16_mul_a16_cpt ATOMIC_CRITICAL_CPT( cmplx16, div_a16_cpt, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt #endif #endif #if OMP_40_ENABLED // OpenMP 4.0: v = x = expr binop x; { v = x; x = expr binop x; } { x = expr binop x; v = x; } for non-commutative operations. // Supported only on IA-32 architecture and Intel(R) 64 // ------------------------------------------------------------------------- // Operation on *lhs, rhs bound by critical section // OP - operator (it's supposed to contain an assignment) // LCK_ID - lock identifier // Note: don't check gtid as it should always be valid // 1, 2-byte - expect valid parameter, other - check before this macro #define OP_CRITICAL_CPT_REV(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ /*temp_val = (*lhs);*/\ (*lhs) = (rhs) OP (*lhs); \ new_value = (*lhs); \ } else { \ new_value = (*lhs);\ (*lhs) = (rhs) OP (*lhs); \ } \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return new_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ // Operation on *lhs, rhs using "compare_and_store" routine // TYPE - operands' type // BITS - size in bits, used to distinguish low level calls // OP - operator // Note: temp_val introduced in order to force the compiler to read // *lhs only once (w/o it the compiler reads *lhs twice) #define OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs OP old_value; \ } \ if( flag ) { \ return new_value; \ } else \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_REV(TYPE_ID,OP_ID,TYPE,BITS,OP,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ } ATOMIC_CMPXCHG_CPT_REV( fixed1, div_cpt_rev, kmp_int8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, div_cpt_rev, kmp_uint8, 8, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shl_cpt_rev, kmp_int8, 8, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, shr_cpt_rev, kmp_int8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1u, shr_cpt_rev, kmp_uint8, 8, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed1, sub_cpt_rev, kmp_int8, 8, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, div_cpt_rev, kmp_int16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, div_cpt_rev, kmp_uint16, 16, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shl_cpt_rev, kmp_int16, 16, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, shr_cpt_rev, kmp_int16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2u, shr_cpt_rev, kmp_uint16, 16, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed2, sub_cpt_rev, kmp_int16, 16, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, div_cpt_rev, kmp_int32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, div_cpt_rev, kmp_uint32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shl_cpt_rev, kmp_int32, 32, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, shr_cpt_rev, kmp_int32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4u, shr_cpt_rev, kmp_uint32, 32, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed4, sub_cpt_rev, kmp_int32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, div_cpt_rev, kmp_int64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, div_cpt_rev, kmp_uint64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shl_cpt_rev, kmp_int64, 64, <<, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shl_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, shr_cpt_rev, kmp_int64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8u, shr_cpt_rev, kmp_uint64, 64, >>, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_shr_cpt_rev ATOMIC_CMPXCHG_CPT_REV( fixed8, sub_cpt_rev, kmp_int64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, div_cpt_rev, kmp_real32, 32, /, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float4, sub_cpt_rev, kmp_real32, 32, -, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, div_cpt_rev, kmp_real64, 64, /, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev ATOMIC_CMPXCHG_CPT_REV( float8, sub_cpt_rev, kmp_real64, 64, -, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev // TYPE_ID,OP_ID, TYPE, OP, GOMP_FLAG // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) // TYPE_ID, OP_ID, TYPE - detailed above // OP - operator // LCK_ID - lock identifier, used to possibly distinguish lock variable #define ATOMIC_CRITICAL_CPT_REV(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT(TYPE_ID,OP_ID,TYPE,TYPE) \ TYPE new_value; \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ /*printf("__kmp_atomic_mode = %d\n", __kmp_atomic_mode);*/\ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV(OP,LCK_ID) \ } /* ------------------------------------------------------------------------- */ // routines for long double type ATOMIC_CRITICAL_CPT_REV( float10, sub_cpt_rev, long double, -, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float10, div_cpt_rev, long double, /, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev #if KMP_HAVE_QUAD // routines for _Quad type ATOMIC_CRITICAL_CPT_REV( float16, sub_cpt_rev, QUAD_LEGACY, -, 16r, 1 ) // __kmpc_atomic_float16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_cpt_rev, QUAD_LEGACY, /, 16r, 1 ) // __kmpc_atomic_float16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( float16, sub_a16_cpt_rev, Quad_a16_t, -, 16r, 1 ) // __kmpc_atomic_float16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( float16, div_a16_cpt_rev, Quad_a16_t, /, 16r, 1 ) // __kmpc_atomic_float16_div_a16_cpt_rev #endif #endif // routines for complex types // ------------------------------------------------------------------------ // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ if( flag ) { \ (*lhs) = (rhs) OP (*lhs); \ (*out) = (*lhs); \ } else { \ (*out) = (*lhs); \ (*lhs) = (rhs) OP (*lhs); \ } \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ OP_CRITICAL_CPT_REV_WRK( OP, 0 ); \ } #else #define OP_GOMP_CRITICAL_CPT_REV_WRK(OP,FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_CPT_REV_WRK(TYPE_ID,OP_ID,TYPE,OP,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_WRK(TYPE_ID,OP_ID,TYPE) \ OP_GOMP_CRITICAL_CPT_REV_WRK(OP,GOMP_FLAG) \ OP_CRITICAL_CPT_REV_WRK(OP,LCK_ID) \ } // The end of workaround for cmplx4 // !!! TODO: check if we need to return void for cmplx4 routines // cmplx4 routines to return void ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, sub_cpt_rev, kmp_cmplx32, -, 8c, 1 ) // __kmpc_atomic_cmplx4_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV_WRK( cmplx4, div_cpt_rev, kmp_cmplx32, /, 8c, 1 ) // __kmpc_atomic_cmplx4_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, sub_cpt_rev, kmp_cmplx64, -, 16c, 1 ) // __kmpc_atomic_cmplx8_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx8, div_cpt_rev, kmp_cmplx64, /, 16c, 1 ) // __kmpc_atomic_cmplx8_div_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, sub_cpt_rev, kmp_cmplx80, -, 20c, 1 ) // __kmpc_atomic_cmplx10_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx10, div_cpt_rev, kmp_cmplx80, /, 20c, 1 ) // __kmpc_atomic_cmplx10_div_cpt_rev #if KMP_HAVE_QUAD ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_cpt_rev, CPLX128_LEG, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_cpt_rev, CPLX128_LEG, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_cpt_rev #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_CPT_REV( cmplx16, sub_a16_cpt_rev, kmp_cmplx128_a16_t, -, 32c, 1 ) // __kmpc_atomic_cmplx16_sub_a16_cpt_rev ATOMIC_CRITICAL_CPT_REV( cmplx16, div_a16_cpt_rev, kmp_cmplx128_a16_t, /, 32c, 1 ) // __kmpc_atomic_cmplx16_div_a16_cpt_rev #endif #endif // Capture reverse for mixed type: RHS=float16 #if KMP_HAVE_QUAD // Beginning of a definition (provides name, parameters, gebug trace) // TYPE_ID - operands type and size (fixed*, fixed*u for signed, unsigned fixed) // OP_ID - operation identifier (add, sub, mul, ...) // TYPE - operands' type // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_CPT_REV_MIX(TYPE_ID,TYPE,OP_ID,BITS,OP,RTYPE_ID,RTYPE,LCK_ID,MASK,GOMP_FLAG) \ ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) \ OP_CMPXCHG_CPT_REV(TYPE,BITS,OP) \ } // ------------------------------------------------------------------------- #define ATOMIC_CRITICAL_CPT_REV_MIX(TYPE_ID,TYPE,OP_ID,OP,RTYPE_ID,RTYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_CPT_MIX(TYPE_ID,OP_ID,TYPE,RTYPE_ID,RTYPE) \ TYPE new_value; \ OP_GOMP_CRITICAL_CPT_REV(OP,GOMP_FLAG) /* send assignment */ \ OP_CRITICAL_CPT_REV(OP,LCK_ID) /* send assignment */ \ } ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1, char, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1u, uchar, sub_cpt_rev, 8, -, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1, char, div_cpt_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed1u, uchar, div_cpt_rev, 8, /, fp, _Quad, 1i, 0, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1u_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2, short, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2u, ushort, sub_cpt_rev, 16, -, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2, short, div_cpt_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed2u, ushort, div_cpt_rev, 16, /, fp, _Quad, 2i, 1, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2u_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4, kmp_int32, sub_cpt_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4u, kmp_uint32, sub_cpt_rev, 32, -, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4, kmp_int32, div_cpt_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed4u, kmp_uint32, div_cpt_rev, 32, /, fp, _Quad, 4i, 3, 0 ) // __kmpc_atomic_fixed4u_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8, kmp_int64, sub_cpt_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8u, kmp_uint64, sub_cpt_rev, 64, -, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8, kmp_int64, div_cpt_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( fixed8u, kmp_uint64, div_cpt_rev, 64, /, fp, _Quad, 8i, 7, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8u_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( float4, kmp_real32, sub_cpt_rev, 32, -, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( float4, kmp_real32, div_cpt_rev, 32, /, fp, _Quad, 4r, 3, KMP_ARCH_X86 ) // __kmpc_atomic_float4_div_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( float8, kmp_real64, sub_cpt_rev, 64, -, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_sub_cpt_rev_fp ATOMIC_CMPXCHG_CPT_REV_MIX( float8, kmp_real64, div_cpt_rev, 64, /, fp, _Quad, 8r, 7, KMP_ARCH_X86 ) // __kmpc_atomic_float8_div_cpt_rev_fp ATOMIC_CRITICAL_CPT_REV_MIX( float10, long double, sub_cpt_rev, -, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_sub_cpt_rev_fp ATOMIC_CRITICAL_CPT_REV_MIX( float10, long double, div_cpt_rev, /, fp, _Quad, 10r, 1 ) // __kmpc_atomic_float10_div_cpt_rev_fp #endif //KMP_HAVE_QUAD // OpenMP 4.0 Capture-write (swap): {v = x; x = expr;} #define ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ old_value = (*lhs); \ (*lhs) = rhs; \ \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return old_value; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP( 0 ); \ } #else #define GOMP_CRITICAL_SWP(FLAG) #endif /* KMP_GOMP_COMPAT */ #define ATOMIC_XCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_FIXED##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define ATOMIC_XCHG_FLOAT_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ old_value = KMP_XCHG_REAL##BITS( lhs, rhs ); \ return old_value; \ } // ------------------------------------------------------------------------ #define CMPXCHG_SWP(TYPE,BITS) \ { \ TYPE KMP_ATOMIC_VOLATILE temp_val; \ TYPE old_value, new_value; \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ while ( ! KMP_COMPARE_AND_STORE_ACQ##BITS( (kmp_int##BITS *) lhs, \ *VOLATILE_CAST(kmp_int##BITS *) &old_value, \ *VOLATILE_CAST(kmp_int##BITS *) &new_value ) ) \ { \ KMP_CPU_PAUSE(); \ \ temp_val = *lhs; \ old_value = temp_val; \ new_value = rhs; \ } \ return old_value; \ } // ------------------------------------------------------------------------- #define ATOMIC_CMPXCHG_SWP(TYPE_ID,TYPE,BITS,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CMPXCHG_SWP(TYPE,BITS) \ } ATOMIC_XCHG_SWP( fixed1, kmp_int8, 8, KMP_ARCH_X86 ) // __kmpc_atomic_fixed1_swp ATOMIC_XCHG_SWP( fixed2, kmp_int16, 16, KMP_ARCH_X86 ) // __kmpc_atomic_fixed2_swp ATOMIC_XCHG_SWP( fixed4, kmp_int32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_fixed4_swp ATOMIC_XCHG_FLOAT_SWP( float4, kmp_real32, 32, KMP_ARCH_X86 ) // __kmpc_atomic_float4_swp #if ( KMP_ARCH_X86 ) ATOMIC_CMPXCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_CMPXCHG_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #else ATOMIC_XCHG_SWP( fixed8, kmp_int64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_fixed8_swp ATOMIC_XCHG_FLOAT_SWP( float8, kmp_real64, 64, KMP_ARCH_X86 ) // __kmpc_atomic_float8_swp #endif // ------------------------------------------------------------------------ // Routines for Extended types: long double, _Quad, complex flavours (use critical section) #define ATOMIC_CRITICAL_SWP(TYPE_ID,TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP(TYPE_ID,TYPE) \ TYPE old_value; \ GOMP_CRITICAL_SWP(GOMP_FLAG) \ CRITICAL_SWP(LCK_ID) \ } // ------------------------------------------------------------------------ // !!! TODO: check if we need to return void for cmplx4 routines // Workaround for cmplx4. Regular routines with return value don't work // on Win_32e. Let's return captured values through the additional parameter. #define ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ void __kmpc_atomic_##TYPE_ID##_swp( ident_t *id_ref, int gtid, TYPE * lhs, TYPE rhs, TYPE * out ) \ { \ KMP_DEBUG_ASSERT( __kmp_init_serial ); \ KA_TRACE(100,("__kmpc_atomic_" #TYPE_ID "_swp: T#%d\n", gtid )); #define CRITICAL_SWP_WRK(LCK_ID) \ __kmp_acquire_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ \ tmp = (*lhs); \ (*lhs) = (rhs); \ (*out) = tmp; \ __kmp_release_atomic_lock( & ATOMIC_LOCK##LCK_ID, gtid ); \ return; // ------------------------------------------------------------------------ #ifdef KMP_GOMP_COMPAT #define GOMP_CRITICAL_SWP_WRK(FLAG) \ if ( (FLAG) && (__kmp_atomic_mode == 2) ) { \ KMP_CHECK_GTID; \ CRITICAL_SWP_WRK( 0 ); \ } #else #define GOMP_CRITICAL_SWP_WRK(FLAG) #endif /* KMP_GOMP_COMPAT */ // ------------------------------------------------------------------------ #define ATOMIC_CRITICAL_SWP_WRK(TYPE_ID, TYPE,LCK_ID,GOMP_FLAG) \ ATOMIC_BEGIN_SWP_WRK(TYPE_ID,TYPE) \ TYPE tmp; \ GOMP_CRITICAL_SWP_WRK(GOMP_FLAG) \ CRITICAL_SWP_WRK(LCK_ID) \ } // The end of workaround for cmplx4 ATOMIC_CRITICAL_SWP( float10, long double, 10r, 1 ) // __kmpc_atomic_float10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( float16, QUAD_LEGACY, 16r, 1 ) // __kmpc_atomic_float16_swp #endif // cmplx4 routine to return void ATOMIC_CRITICAL_SWP_WRK( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp //ATOMIC_CRITICAL_SWP( cmplx4, kmp_cmplx32, 8c, 1 ) // __kmpc_atomic_cmplx4_swp ATOMIC_CRITICAL_SWP( cmplx8, kmp_cmplx64, 16c, 1 ) // __kmpc_atomic_cmplx8_swp ATOMIC_CRITICAL_SWP( cmplx10, kmp_cmplx80, 20c, 1 ) // __kmpc_atomic_cmplx10_swp #if KMP_HAVE_QUAD ATOMIC_CRITICAL_SWP( cmplx16, CPLX128_LEG, 32c, 1 ) // __kmpc_atomic_cmplx16_swp #if ( KMP_ARCH_X86 ) ATOMIC_CRITICAL_SWP( float16_a16, Quad_a16_t, 16r, 1 ) // __kmpc_atomic_float16_a16_swp ATOMIC_CRITICAL_SWP( cmplx16_a16, kmp_cmplx128_a16_t, 32c, 1 ) // __kmpc_atomic_cmplx16_a16_swp #endif #endif // End of OpenMP 4.0 Capture #endif //OMP_40_ENABLED #endif //KMP_ARCH_X86 || KMP_ARCH_X86_64 #undef OP_CRITICAL /* ------------------------------------------------------------------------ */ /* Generic atomic routines */ /* ------------------------------------------------------------------------ */ void __kmpc_atomic_1( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #else TRUE #endif ) { kmp_int8 old_value, new_value; old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ8 ( (kmp_int8 *) lhs, *(kmp_int8 *) &old_value, *(kmp_int8 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int8 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 1-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_1i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_1i, gtid ); } } void __kmpc_atomic_2( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x1) /* make sure address is 2-byte aligned */ #endif ) { kmp_int16 old_value, new_value; old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ16 ( (kmp_int16 *) lhs, *(kmp_int16 *) &old_value, *(kmp_int16 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int16 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // All 2-byte data is of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_2i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_2i, gtid ); } } void __kmpc_atomic_4( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( // // FIXME: On IA-32 architecture, gcc uses cmpxchg only for 4-byte ints. // Gomp compatibility is broken if this routine is called for floats. // #if KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x3) /* make sure address is 4-byte aligned */ #endif ) { kmp_int32 old_value, new_value; old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ32 ( (kmp_int32 *) lhs, *(kmp_int32 *) &old_value, *(kmp_int32 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int32 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_4i for all 4-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_4i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_4i, gtid ); } } void __kmpc_atomic_8( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); if ( #if KMP_ARCH_X86 && defined(KMP_GOMP_COMPAT) FALSE /* must use lock */ #elif KMP_ARCH_X86 || KMP_ARCH_X86_64 TRUE /* no alignment problems */ #else ! ( (kmp_uintptr_t) lhs & 0x7) /* make sure address is 8-byte aligned */ #endif ) { kmp_int64 old_value, new_value; old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); /* TODO: Should this be acquire or release? */ while ( ! KMP_COMPARE_AND_STORE_ACQ64 ( (kmp_int64 *) lhs, *(kmp_int64 *) &old_value, *(kmp_int64 *) &new_value ) ) { KMP_CPU_PAUSE(); old_value = *(kmp_int64 *) lhs; (*f)( &new_value, &old_value, rhs ); } return; } else { // // Use __kmp_atomic_lock_8i for all 8-byte data, // even if it isn't of integer data type. // #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_8i, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_8i, gtid ); } } void __kmpc_atomic_10( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_10r, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_10r, gtid ); } void __kmpc_atomic_16( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_16c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_16c, gtid ); } void __kmpc_atomic_20( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_20c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_20c, gtid ); } void __kmpc_atomic_32( ident_t *id_ref, int gtid, void* lhs, void* rhs, void (*f)( void *, void *, void * ) ) { KMP_DEBUG_ASSERT( __kmp_init_serial ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_acquire_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_acquire_atomic_lock( & __kmp_atomic_lock_32c, gtid ); (*f)( lhs, lhs, rhs ); #ifdef KMP_GOMP_COMPAT if ( __kmp_atomic_mode == 2 ) { __kmp_release_atomic_lock( & __kmp_atomic_lock, gtid ); } else #endif /* KMP_GOMP_COMPAT */ __kmp_release_atomic_lock( & __kmp_atomic_lock_32c, gtid ); } // AC: same two routines as GOMP_atomic_start/end, but will be called by our compiler // duplicated in order to not use 3-party names in pure Intel code // TODO: consider adding GTID parameter after consultation with Ernesto/Xinmin. void __kmpc_atomic_start(void) { int gtid = __kmp_entry_gtid(); KA_TRACE(20, ("__kmpc_atomic_start: T#%d\n", gtid)); __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid); } void __kmpc_atomic_end(void) { int gtid = __kmp_get_gtid(); KA_TRACE(20, ("__kmpc_atomic_end: T#%d\n", gtid)); __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid); } /* ------------------------------------------------------------------------ */ /* ------------------------------------------------------------------------ */ /*! @} */ // end of file
cryptocontext.h
/** * @file cryptocontext.h -- Control for encryption operations. * @author TPOC: [email protected] * * @section LICENSE * * Copyright (c) 2017, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SRC_DEMO_PRE_CRYPTOCONTEXT_H_ #define SRC_DEMO_PRE_CRYPTOCONTEXT_H_ #include "palisade.h" #include "cryptocontexthelper.h" #include "cryptotiming.h" namespace lbcrypto { template<typename Element> class CryptoContextFactory; template<typename Element> class CryptoContextImpl; template<typename Element> using CryptoContext = shared_ptr<CryptoContextImpl<Element>>; /** * @brief CryptoContextImpl * * A CryptoContextImpl is the object used to access the PALISADE library * * All PALISADE functionality is accessed by way of an instance of a CryptoContextImpl; we say that various objects are * "created in" a context, and can only be used in the context in which they were created * * All PALISADE methods are accessed through CryptoContextImpl methods. Guards are implemented to make certain that * only valid objects that have been created in the context are used * * Contexts are created using the CryptoContextFactory, and can be serialized and recovered from a serialization */ template<typename Element> class CryptoContextImpl : public Serializable { friend class CryptoContextFactory<Element>; private: shared_ptr<LPCryptoParameters<Element>> params; /*!< crypto parameters used for this context */ shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme; /*!< algorithm used; accesses all crypto methods */ static std::map<string,std::vector<LPEvalKey<Element>>> evalMultKeyMap; /*!< cached evalmult keys, by secret key UID */ static std::map<string,shared_ptr<std::map<usint,LPEvalKey<Element>>>> evalSumKeyMap; /*!< cached evalsum keys, by secret key UID */ bool doTiming; vector<TimingInfo>* timeSamples; /** * Private methods to compare two contexts; this is only used internally and is not generally available * @param a - shared pointer in the object * @param b - this object, usually * @return true if the shared pointer is a pointer to "this" */ friend bool operator==(const CryptoContext<Element>& a, const CryptoContext<Element>& b) { if( a->params.get() != b->params.get() ) return false; return true; } friend bool operator!=(const CryptoContext<Element>& a, const CryptoContext<Element>& b) { return !( a == b ); } /** * Private methods to compare two contexts; this is only used internally and is not generally available * @param a - shared pointer in the object * @param b - this object, usually * @return true if the shared pointer is a pointer to "this" */ friend bool operator==(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { if( a.params.get() != b.params.get() ) return false; return true; } friend bool operator!=(const CryptoContextImpl<Element>& a, const CryptoContextImpl<Element>& b) { return !( a == b ); } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(const Ciphertext<Element> a, const Ciphertext<Element> b) const { if( a == NULL || b == NULL ) PALISADE_THROW( type_error, "Null Ciphertext"); if( a->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl"); if( a->GetCryptoContext() != b->GetCryptoContext() ) PALISADE_THROW( type_error, "Ciphertexts were not created in the same CryptoContextImpl"); if( a->GetKeyTag() != b->GetKeyTag() ) PALISADE_THROW( type_error, "Ciphertexts were not encrypted with same keys" ); if( a->GetEncodingType() != b->GetEncodingType() ) PALISADE_THROW( type_error, "Ciphertext encoding types do not match"); } /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext is permitted * @param a * @param b */ void TypeCheck(const Ciphertext<Element> a, const Plaintext b) const { if( a == NULL ) PALISADE_THROW( type_error, "Null Ciphertext"); if( b == NULL ) PALISADE_THROW( type_error, "Null Plaintext"); if( a->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl"); if( a->GetEncodingType() != b->GetEncodingType() ) PALISADE_THROW( type_error, "Ciphertext and Plaintext encoding types do not match"); } /** * TypeCheck makes sure that an operation between two ciphertexts is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, const RationalCiphertext<Element>& b) const { if( a.GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl"); if( a.GetCryptoContext() != b.GetCryptoContext() ) PALISADE_THROW( type_error, "Ciphertexts were not created in the same CryptoContextImpl"); if( a.GetKeyTag() != b.GetKeyTag() ) PALISADE_THROW( type_error, "Ciphertexts were not encrypted with same keys" ); if( a.GetNumerator()->GetEncodingType() != b.GetNumerator()->GetEncodingType() ) PALISADE_THROW( type_error, "Ciphertext encoding types do not match"); } /** * TypeCheck makes sure that an operation between a ciphertext and a plaintext is permitted * @param a * @param b */ void TypeCheck(const RationalCiphertext<Element>& a, const Plaintext b) const { if( b == NULL ) PALISADE_THROW( type_error, "Null Plaintext"); if( a.GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl"); if( a.GetNumerator()->GetEncodingType() != b->GetEncodingType() ) PALISADE_THROW( type_error, "Ciphertext and Plaintext encoding types do not match"); } bool Mismatched(const CryptoContext<Element> a) const { if( a.get() != this ) { return true; } return false; } public: /** * CryptoContextImpl constructor from pointers to parameters and scheme * @param params - pointer to CryptoParameters * @param scheme - pointer to Crypto Scheme */ CryptoContextImpl(LPCryptoParameters<Element> *params = 0, LPPublicKeyEncryptionScheme<Element> *scheme = 0) { this->params.reset(params); this->scheme.reset(scheme); this->doTiming = false; this->timeSamples = 0; } /** * CryptoContextImpl constructor from shared pointers to parameters and scheme * @param params - shared pointer to CryptoParameters * @param scheme - sharedpointer to Crypto Scheme */ CryptoContextImpl(shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme) { this->params = params; this->scheme = scheme; this->doTiming = false; this->timeSamples = 0; } /** * Copy constructor * @param c - source */ CryptoContextImpl(const CryptoContextImpl<Element>& c) { params = c.params; scheme = c.scheme; doTiming = c.doTiming; timeSamples = c.timeSamples; } /** * Assignment * @param rhs - assigning from * @return this */ CryptoContextImpl<Element>& operator=(const CryptoContextImpl<Element>& rhs) { params = rhs.params; scheme = rhs.scheme; doTiming = rhs.doTiming; timeSamples = rhs.timeSamples; return *this; } /** * A CryptoContextImpl is only valid if the shared pointers are both valid */ operator bool() const { return bool(params) && bool(scheme); } // TIMING METHODS /** * StartTiming method activates timing of CryptoMethods * * @param timeSamples points to a vector in which timing samples will be stored */ void StartTiming(vector<TimingInfo>* timeSamples) { this->timeSamples = timeSamples; doTiming = true; } /* * StopTiming - turns off timing */ void StopTiming() { doTiming = false; } /** * ResumeTiming - re-enables timing with existing TimingInfo vector */ void ResumeTiming() { doTiming = true; } /** * ResetTiming - erases measurements */ void ResetTiming() { this->timeSamples->clear(); } // SERIALIZATION METHODS /** * Serialize the CryptoContextImpl * * @param serObj - rapidJson object for the serializaion * @return true on success */ bool Serialize(Serialized* serObj) const; /** * Deserialize the context AND initialize the algorithm * * @param serObj * @return true on success */ bool Deserialize(const Serialized& serObj) { throw std::logic_error("Deserialize by using CryptoContextFactory::DeserializeAndCreateContext"); } /** * SerializeEvalMultKey for all EvalMult keys * method will serialize each CryptoContextImpl only once * * @param serObj - serialization * @return true on success */ static bool SerializeEvalMultKey(Serialized* serObj); /** * SerializeEvalMultKey for a single EvalMult key * method will serialize entire key AND cryptocontext * * @param serObj - serialization * @param id for key to serialize * @return true on success (false on failure or key id not found) */ static bool SerializeEvalMultKey(Serialized* serObj, const string& id); /** * SerializeEvalMultKey for all EvalMultKeys made in a given context * method will serialize the context only once * * @param serObj - serialization * @param cc whose keys should be serialized * @return true on success (false on failure or no keys found) */ static bool SerializeEvalMultKey(Serialized* serObj, const CryptoContext<Element> cc); /** * DeserializeEvalMultKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param serObj - serialization * @return true on success */ static bool DeserializeEvalMultKey(const Serialized& serObj); /** * ClearEvalMultKeys - flush EvalMultKey cache */ static void ClearEvalMultKeys(); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given id * @param id */ static void ClearEvalMultKeys(const string& id); /** * ClearEvalMultKeys - flush EvalMultKey cache for a given context * @param cc */ static void ClearEvalMultKeys(const CryptoContext<Element> cc); /** * InsertEvalMultKey - add the given vector of keys to the map, replacing the existing vector if there * @param vectorToInsert */ static void InsertEvalMultKey(const std::vector<LPEvalKey<Element>>& vectorToInsert); /** * SerializeEvalSumKey for all EvalSum keys * method will serialize each CryptoContextImpl only once * * @param serObj - serialization * @return true on success */ static bool SerializeEvalSumKey(Serialized* serObj); /** * SerializeEvalSumKey for a single EvalSum key * method will serialize entire key AND cryptocontext * * @param serObj - serialization * @param id for key to serialize * @return true on success (false on failure or key id not found) */ static bool SerializeEvalSumKey(Serialized* serObj, const string& id); /** * SerializeEvalSumKey for all EvalSumKeys made in a given context * method will serialize the context only once * * @param serObj - serialization * @param cc whose keys should be serialized * @return true on success (false on failure or no keys found) */ static bool SerializeEvalSumKey(Serialized* serObj, const CryptoContext<Element> cc); /** * DeserializeEvalSumKey deserialize all keys in the serialization * deserialized keys silently replace any existing matching keys * deserialization will create CryptoContextImpl if necessary * * @param serObj - serialization * @return true on success */ static bool DeserializeEvalSumKey(const Serialized& serObj); /** * ClearEvalSumKeys - flush EvalSumKey cache */ static void ClearEvalSumKeys(); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given id * @param id */ static void ClearEvalSumKeys(const string& id); /** * ClearEvalSumKeys - flush EvalSumKey cache for a given context * @param cc */ static void ClearEvalSumKeys(const CryptoContext<Element> cc); /** * InsertEvalSumKey - add the given map of keys to the map, replacing the existing map if there * @param mapToInsert */ static void InsertEvalSumKey(const shared_ptr<std::map<usint,LPEvalKey<Element>>> mapToInsert); // TURN FEATURES ON /** * Enable a particular feature for use with this CryptoContextImpl * @param feature - the feature that should be enabled */ void Enable(PKESchemeFeature feature) { scheme->Enable(feature); } /** * Enable several features at once * @param featureMask - bitwise or of several PKESchemeFeatures */ void Enable(usint featureMask) { scheme->Enable(featureMask); } // GETTERS /** * Getter for Scheme * @return scheme */ const shared_ptr<LPPublicKeyEncryptionScheme<Element>> GetEncryptionAlgorithm() const { return scheme; } /** * Getter for CryptoParams * @return params */ const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return params; } /** * Getter for element params * @return */ const shared_ptr<typename Element::Params> GetElementParams() const { return params->GetElementParams(); } /** * Getter for encoding params * @return */ const EncodingParams GetEncodingParams() const { return params->GetEncodingParams(); } /** * Get the cyclotomic order used for this context * * @return */ const usint GetCyclotomicOrder() const { return params->GetElementParams()->GetCyclotomicOrder(); } /** * Get the ring dimension used for this context * * @return */ const usint GetRingDimension() const { return params->GetElementParams()->GetRingDimension(); } /** * Get the ciphertext modulus used for this context * * @return */ const typename Element::Integer& GetModulus() const { return params->GetElementParams()->GetModulus(); } /** * Get the ciphertext modulus used for this context * * @return */ const typename Element::Integer& GetRootOfUnity() const { return params->GetElementParams()->GetRootOfUnity(); } /** * KeyGen generates a key pair using this algorithm's KeyGen method * @return a public/secret key pair */ LPKeyPair<Element> KeyGen() { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeyGen, currentDateTime() - start) ); } return r; } /** * KeyGen generates a Multiparty key pair using this algorithm's KeyGen method from two keys * @param pk first public key used to coordinate the creation of later public keys. * @return a public/secret key pair */ LPKeyPair<Element> MultipartyKeyGen( const LPPublicKey<Element> pk, bool makeSparse=false, bool pre=false) { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), pk, makeSparse, pre); if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKey, currentDateTime() - start) ); } return r; } /** * KeyGen generates a Multiparty key pair using a vector of secret keys * @param secretKeys a vector of the secret keys to be used for multiparty computation. * @return a public/secret key pair */ LPKeyPair<Element> MultipartyKeyGen( const vector<LPPrivateKey<Element>>& secretKeys) { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->MultipartyKeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), secretKeys, false); if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyKeyGenKeyvec, currentDateTime() - start) ); } return r; } /** * Lead Multiparty Decryption method for PALISADE multiparty operations. * This should be performed by exactly one of the clients. * All other clients should perform the MultipartyDecryptMain operation. * @param privateKey the secret key of the lead decryption client * @param ciphertext vector of encrypted ciphertext * @return vector of partially decrypted ciphertexts */ vector<Ciphertext<Element>> MultipartyDecryptLead( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) throw std::logic_error("Information passed to MultipartyDecryptLead was not generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i = 0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || Mismatched(ciphertext[i]->GetCryptoContext()) ) throw std::logic_error("A ciphertext passed to MultipartyDecryptLead was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptLead(privateKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptLead, currentDateTime() - start) ); } return newCiphertext; } /** * Multiparty decryption method for PALISADE multiparty operations. * The lead multiparty decryption operation should be performed by exactly one of the clients. * All other clients should perform this MultipartyDecryptMain operation. * @param privateKey - for decryption * @param ciphertext - vector of encrypted ciphertext * @return vector of partially decrypted ciphertexts */ vector<Ciphertext<Element>> MultipartyDecryptMain( const LPPrivateKey<Element> privateKey, const vector<Ciphertext<Element>>& ciphertext) const { if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) throw std::logic_error("Information passed to MultipartyDecryptMain was not generated with this crypto context"); vector<Ciphertext<Element>> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i = 0; i < ciphertext.size(); i++ ) { if( ciphertext[i] == NULL || Mismatched(ciphertext[i]->GetCryptoContext()) ) throw std::logic_error("A ciphertext passed to MultipartyDecryptMain was not generated with this crypto context"); newCiphertext.push_back( GetEncryptionAlgorithm()->MultipartyDecryptMain(privateKey, ciphertext[i]) ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptMain, currentDateTime() - start) ); } return newCiphertext; } /** * Final multiparty decryption method to fuse the partially decrypted ciphertexts into a decrypted plaintext. * The lead multiparty decryption operation should be performed by exactly one of the clients. * All other clients should perform the MultipartyDecryptMain operation. * @param partialCiphertextVec - vector of partially decrypted ciphertexts. * @param plaintext - pointer to destination for the result of decryption * @param doPadding - true if input plaintext was padded; causes unpadding on last piece of ciphertext * @return size of plaintext */ DecryptResult MultipartyDecryptFusion( const vector<Ciphertext<Element>>& partialCiphertextVec, Plaintext *plaintext) const { DecryptResult result; //Make sure we're processing ciphertexts. size_t last_ciphertext = partialCiphertextVec.size(); if ( last_ciphertext < 1 ) return result; double start = 0; if( doTiming ) start = currentDateTime(); for( size_t i = 0; i < last_ciphertext; i++ ) { if (partialCiphertextVec[i] == NULL || Mismatched(partialCiphertextVec[i]->GetCryptoContext())) throw std::logic_error("A ciphertext passed to MultipartyDecryptFusion was not generated with this crypto context"); if (partialCiphertextVec[i]->GetEncodingType() != partialCiphertextVec[0]->GetEncodingType()) throw std::logic_error("Ciphertexts passed to MultipartyDecryptFusion have mismatched encoding types"); } // determine which type of plaintext that you need to decrypt into Plaintext decrypted = GetPlaintextForDecrypt(partialCiphertextVec[0]->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); result = GetEncryptionAlgorithm()->MultipartyDecryptFusion(partialCiphertextVec, &decrypted->GetElement<NativePoly>()); if (result.isValid == false) return result; decrypted->Decode(); *plaintext = decrypted; if( doTiming ) { timeSamples->push_back( TimingInfo(OpMultiPartyDecryptFusion, currentDateTime() - start) ); } return result; } /** * SparseKeyGen generates a key pair with special structure, and without full entropy, * for use in special cases like Ring Reduction * @return a public/secret key pair */ LPKeyPair<Element> SparseKeyGen() { double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeyGen(CryptoContextFactory<Element>::GetContextForPointer(this), true); if( doTiming ) { timeSamples->push_back( TimingInfo(OpSparseKeyGen, currentDateTime() - start) ); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (public) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen( const LPPublicKey<Element> newKey, const LPPrivateKey<Element> oldKey) const { if( newKey == NULL || oldKey == NULL || Mismatched(newKey->GetCryptoContext()) || Mismatched(oldKey->GetCryptoContext()) ) throw std::logic_error("Keys passed to ReKeyGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReKeyGenPubPri, currentDateTime() - start) ); } return r; } /** * ReKeyGen produces an Eval Key that PALISADE can use for Proxy Re Encryption * @param newKey (private) * @param oldKey (private) * @return new evaluation key */ LPEvalKey<Element> ReKeyGen( const LPPrivateKey<Element> newKey, const LPPrivateKey<Element> oldKey) const { if (newKey == NULL || oldKey == NULL || Mismatched(newKey->GetCryptoContext()) || Mismatched(oldKey->GetCryptoContext()) ) throw std::logic_error("Keys passed to ReKeyGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->ReKeyGen(newKey, oldKey); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReKeyGenPriPri, currentDateTime() - start) ); } return r; } /** * EvalMultKeyGen creates a key that can be used with the PALISADE EvalMult operator * @param key * @return new evaluation key */ void EvalMultKeyGen(const LPPrivateKey<Element> key); /** * EvalMultsKeyGen creates a vector evalmult keys that can be used with the PALISADE EvalMult operator * 1st key (for s^2) is used for multiplication of ciphertexts of depth 1 * 2nd key (for s^3) is used for multiplication of ciphertexts of depth 2, etc. * * @param key * @return a vector of evaluation keys */ void EvalMultKeysGen(const LPPrivateKey<Element> key); /** * GetEvalMultKeyVector fetches the eval mult keys for a given KeyID * @param keyID * @return key vector from ID */ static const vector<LPEvalKey<Element>>& GetEvalMultKeyVector(const string& keyID); /** * GetEvalMultKeys * @return map of all the keys */ static const std::map<string,std::vector<LPEvalKey<Element>>>& GetAllEvalMultKeys(); /** * KeySwitchGen creates a key that can be used with the PALISADE KeySwitch operation * @param key1 * @param key2 * @return new evaluation key */ LPEvalKey<Element> KeySwitchGen( const LPPrivateKey<Element> key1, const LPPrivateKey<Element> key2) const { if( key1 == NULL || key2 == NULL || Mismatched(key1->GetCryptoContext()) || Mismatched(key2->GetCryptoContext()) ) throw std::logic_error("Keys passed to KeySwitchGen were not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto r = GetEncryptionAlgorithm()->KeySwitchGen(key1, key2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeySwitchGen, currentDateTime() - start) ); } return r; } /** * Encrypt a plaintext using a given public key * @param publicKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt( const LPPublicKey<Element> publicKey, Plaintext plaintext) { if( publicKey == NULL ) throw std::logic_error("null key passed to Encrypt"); if( plaintext == NULL ) throw std::logic_error("null plaintext passed to Encrypt"); if( Mismatched(publicKey->GetCryptoContext()) ) throw std::logic_error("key passed to Encrypt was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, plaintext->GetEncodedElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType( plaintext->GetEncodingType() ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptPub, currentDateTime() - start) ); } return ciphertext; } /** * Encrypt a plaintext using a given private key * @param privateKey * @param plaintext * @return ciphertext (or null on failure) */ Ciphertext<Element> Encrypt( const LPPrivateKey<Element> privateKey, Plaintext plaintext) const { if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) throw std::logic_error("key passed to Encrypt was not generated with this crypto context"); if( plaintext == NULL ) throw std::logic_error("null plaintext passed to Encrypt"); double start = 0; if( doTiming ) start = currentDateTime(); Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(privateKey, plaintext->GetEncodedElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType( plaintext->GetEncodingType() ); } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptPriv, currentDateTime() - start) ); } return ciphertext; } /** * Encrypt a matrix of Plaintext * @param publicKey - for encryption * @param plaintext - to encrypt * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return a vector of pointers to Ciphertexts created by encrypting the plaintext */ shared_ptr<Matrix<RationalCiphertext<Element>>> EncryptMatrix( const LPPublicKey<Element> publicKey, Matrix<Plaintext> &plaintext) { if (publicKey == NULL || Mismatched(publicKey->GetCryptoContext())) throw std::logic_error("key passed to EncryptMatrix was not generated with this crypto context"); auto zeroAlloc = [=]() { return make_unique<RationalCiphertext<Element>>(publicKey->GetCryptoContext(), true); }; shared_ptr<Matrix<RationalCiphertext<Element>>> cipherResults(new Matrix<RationalCiphertext<Element>> (zeroAlloc, plaintext.GetRows(), plaintext.GetCols())); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < plaintext.GetRows(); row++) { for (size_t col = 0; col < plaintext.GetCols(); col++) { if( plaintext(row,col)->Encode() == false ) return 0; Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, plaintext(row,col)->GetElement<Element>()); if (ciphertext) { ciphertext->SetEncodingType( plaintext(row,col)->GetEncodingType() ); } (*cipherResults)(row, col).SetNumerator(ciphertext); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpEncryptMatrixPlain, currentDateTime() - start) ); } return cipherResults; } /** * Perform an encryption by reading plaintext from a stream, serializing each piece of ciphertext, * and writing the serializations to an output stream * @param publicKey - the encryption key in use * @param instream - where to read the input from * @param ostream - where to write the serialization to * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @return */ void EncryptStream( const LPPublicKey<Element> publicKey, std::istream& instream, std::ostream& outstream) const { // NOTE timing this operation is not supported if( publicKey == NULL || Mismatched(publicKey->GetCryptoContext()) ) throw std::logic_error("key passed to EncryptStream was not generated with this crypto context"); bool padded = false; Plaintext px; size_t chunkSize = this->GetRingDimension(); char *ptxt = new char[chunkSize]; while (instream.good()) { instream.read(ptxt, chunkSize); size_t nRead = instream.gcount(); if (nRead <= 0 && padded) break; px = this->MakeStringPlaintext(std::string(ptxt,nRead)); if (nRead < chunkSize) { padded = true; } Ciphertext<Element> ciphertext = GetEncryptionAlgorithm()->Encrypt(publicKey, px->GetEncodedElement<Element>()); if (!ciphertext) { break; } ciphertext->SetEncodingType( px->GetEncodingType() ); Serialized cS; if (ciphertext->Serialize(&cS)) { if (!SerializableHelper::SerializationToStream(cS, outstream)) { break; } } else { break; } } delete [] ptxt; return; } // PLAINTEXT FACTORY METHODS /** * MakeScalarPlaintext constructs a ScalarEncoding in this context * @param value * @param isSigned * @return plaintext */ Plaintext MakeScalarPlaintext(int64_t value) const { return Plaintext( new ScalarEncoding( this->GetElementParams(), this->GetEncodingParams(), value ) ); } /** * MakeStringPlaintext constructs a StringEncoding in this context * @param str * @return plaintext */ Plaintext MakeStringPlaintext(const string& str) const { return Plaintext( new StringEncoding( this->GetElementParams(), this->GetEncodingParams(), str ) ); } /** * MakeIntegerPlaintext constructs an IntegerEncoding in this context * @param value * @return plaintext */ Plaintext MakeIntegerPlaintext(int64_t value) const { return Plaintext( new IntegerEncoding( this->GetElementParams(), this->GetEncodingParams(), value ) ); } /** * MakeCoefPackedPlaintext constructs a CoefPackedEncoding in this context * @param value * @param isSigned * @return plaintext */ Plaintext MakeCoefPackedPlaintext(const vector<int64_t>& value) const { return Plaintext( new CoefPackedEncoding( this->GetElementParams(), this->GetEncodingParams(), value ) ); } /** * MakeCoefPackedPlaintext constructs a CoefPackedEncoding in this context * @param value * @param isSigned * @return plaintext */ Plaintext MakeCoefPackedPlaintext(const std::initializer_list<int64_t>& value) const { return Plaintext( new CoefPackedEncoding( this->GetElementParams(), this->GetEncodingParams(), value ) ); } /** * MakePackedPlaintext constructs a PackedEncoding in this context * @param value * @return plaintext */ Plaintext MakePackedPlaintext(const vector<uint64_t>& value) const { return Plaintext( new PackedEncoding( this->GetElementParams(), this->GetEncodingParams(), value ) ); } /** * MakePackedPlaintext constructs a PackedEncoding in this context * @param value * @return plaintext */ Plaintext MakePackedPlaintext(const std::initializer_list<uint64_t>& value) const { return Plaintext( new PackedEncoding( this->GetElementParams(), this->GetEncodingParams(), value ) ); } private: static Plaintext GetPlaintextForDecrypt(PlaintextEncodings pte, shared_ptr<typename Element::Params> evp, EncodingParams ep) { Plaintext pt; shared_ptr<typename NativePoly::Params> vp( new typename NativePoly::Params(evp->GetCyclotomicOrder(), ep->GetPlaintextModulus(), 1) ); switch(pte) { case Unknown: throw std::logic_error("Unknown plaintext encoding type in GetPlaintextForDecrypt"); break; case Scalar: pt.reset( new ScalarEncoding(vp,ep) ); break; case Integer: pt.reset( new IntegerEncoding(vp,ep) ); break; case CoefPacked: pt.reset( new CoefPackedEncoding(vp,ep) ); break; case Packed: pt.reset( new PackedEncoding(vp,ep) ); break; case String: pt.reset( new StringEncoding(vp,ep) ); break; } return pt; } public: /** * Decrypt a single ciphertext into the appropriate plaintext * * @param privateKey - decryption key * @param ciphertext - ciphertext to decrypt * @param plaintext - resulting plaintext object pointer is here * @return */ DecryptResult Decrypt( const LPPrivateKey<Element> privateKey, const Ciphertext<Element> ciphertext, Plaintext* plaintext) { if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) throw std::logic_error("Information passed to Decrypt was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); // determine which type of plaintext that you need to decrypt into Plaintext decrypted = GetPlaintextForDecrypt(ciphertext->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult result = GetEncryptionAlgorithm()->Decrypt(privateKey, ciphertext, &decrypted->GetElement<NativePoly>()); if (result.isValid == false) return result; decrypted->Decode(); if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecrypt, currentDateTime() - start) ); } *plaintext = decrypted; return result; } /** * Decrypt method for a matrix of ciphertexts * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrix( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>> *numerator, shared_ptr<Matrix<Plaintext>> *denominator) const { // edge case if ((ciphertext->GetCols()== 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if (privateKey == NULL || Mismatched(privateKey->GetCryptoContext())) throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build matrices for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return lbcrypto::make_unique<Plaintext>(ptx); }; *numerator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) ); *denominator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) ); double start = 0; if( doTiming ) start = currentDateTime(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (Mismatched((*ciphertext)(row, col).GetCryptoContext())) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); // determine which type of plaintext that you need to decrypt into Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(row,col) = decryptedNumerator; (**numerator)(row,col)->Decode(); Plaintext decryptedDenominator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); if( (*ciphertext)(row,col).GetIntegerFlag() == true ) { decryptedDenominator->GetElement<Poly>().SetValuesToZero(); decryptedDenominator->GetElement<Poly>().at(0) = 1; } else { const Ciphertext<Element> ctD = (*ciphertext)(row, col).GetDenominator(); DecryptResult resultD = GetEncryptionAlgorithm()->Decrypt(privateKey, ctD, &decryptedDenominator->GetElement<NativePoly>()); if (resultD.isValid == false) return resultD; (**denominator)(row,col) = decryptedDenominator; } (**denominator)(row, col)->Decode(); } } if( doTiming ) { timeSamples->push_back( TimingInfo(OpDecryptMatrixPlain, currentDateTime() - start) ); } return DecryptResult((**numerator)((*numerator)->GetRows()-1,(*numerator)->GetCols()-1)->GetLength()); } /** * Decrypt method for numerators in a matrix of ciphertexts (packed encoding) * @param privateKey - for decryption * @param ciphertext - matrix of encrypted ciphertexts * @param plaintext - pointer to the destination martrix of plaintexts * @return size of plaintext */ DecryptResult DecryptMatrixNumerator( const LPPrivateKey<Element> privateKey, const shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext, shared_ptr<Matrix<Plaintext>> *numerator) const { // edge case if ((ciphertext->GetCols() == 0) && (ciphertext->GetRows() == 0)) return DecryptResult(); if (privateKey == NULL || Mismatched(privateKey->GetCryptoContext())) throw std::runtime_error("Information passed to DecryptMatrix was not generated with this crypto context"); double start = 0; if (doTiming) start = currentDateTime(); //force all precomputations to take place in advance if( Mismatched((*ciphertext)(0, 0).GetCryptoContext()) ) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(0, 0).GetNumerator(); // need to build a numerator matrix for the result Plaintext ptx = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); auto zeroPackingAlloc = [=]() { return lbcrypto::make_unique<Plaintext>(ptx); }; *numerator = shared_ptr<Matrix<Plaintext>>( new Matrix<Plaintext>(zeroPackingAlloc, ciphertext->GetRows(), ciphertext->GetCols()) ); Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult resultN = GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); if (resultN.isValid == false) return resultN; (**numerator)(0, 0) = decryptedNumerator; (**numerator)(0, 0)->Decode(); for (size_t row = 0; row < ciphertext->GetRows(); row++) { #pragma omp parallel for for (size_t col = 0; col < ciphertext->GetCols(); col++) { if (row + col > 0) { if( Mismatched((*ciphertext)(row, col).GetCryptoContext()) ) throw std::runtime_error("A ciphertext passed to DecryptMatrix was not generated with this crypto context"); const Ciphertext<Element> ctN = (*ciphertext)(row, col).GetNumerator(); Plaintext decryptedNumerator = GetPlaintextForDecrypt(ctN->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); GetEncryptionAlgorithm()->Decrypt(privateKey, ctN, &decryptedNumerator->GetElement<NativePoly>()); (**numerator)(row, col) = decryptedNumerator; (**numerator)(row, col)->Decode(); } } } if (doTiming) { timeSamples->push_back(TimingInfo(OpDecryptMatrixPacked, currentDateTime() - start)); } return DecryptResult((**numerator)((*numerator)->GetRows() - 1, (*numerator)->GetCols() - 1)->GetLength()); } /** * read instream for a sequence of serialized ciphertext; deserialize it, decrypt it, and write it to outstream * @param privateKey - reference to the decryption key * @param instream - input stream with sequence of serialized ciphertexts * @param outstream - output stream for plaintext * @return total bytes processed */ size_t DecryptStream( const LPPrivateKey<Element> privateKey, std::istream& instream, std::ostream& outstream) { // NOTE timing this operation is not supported if( privateKey == NULL || Mismatched(privateKey->GetCryptoContext()) ) throw std::logic_error("Information passed to DecryptStream was not generated with this crypto context"); Serialized serObj; size_t tot = 0; bool firstTime = true; Plaintext pte[2]; bool whichArray = false; while( SerializableHelper::StreamToSerialization(instream, &serObj) ) { Ciphertext<Element> ct; if( (ct = deserializeCiphertext(serObj)) != NULL ) { if( ct->GetEncodingType() != String ) { throw std::logic_error("Library can only stream string encodings"); } pte[whichArray] = GetPlaintextForDecrypt(ct->GetEncodingType(), this->GetElementParams(), this->GetEncodingParams()); DecryptResult res = GetEncryptionAlgorithm()->Decrypt(privateKey, ct, &pte[whichArray]->GetElement<NativePoly>()); if( !res.isValid ) return tot; tot += res.messageLength; pte[whichArray]->Decode(); if( !firstTime ) { outstream << pte[!whichArray]->GetStringValue(); } firstTime = false; whichArray = !whichArray; } else return tot; } outstream << pte[!whichArray]->GetStringValue(); return tot; } /** * ReEncrypt - Proxy Re Encryption mechanism for PALISADE * @param evalKey - evaluation key from the PRE keygen method * @param ciphertext - vector of shared pointers to encrypted Ciphertext * @return vector of shared pointers to re-encrypted ciphertexts */ Ciphertext<Element> ReEncrypt( LPEvalKey<Element> evalKey, Ciphertext<Element> ciphertext) const { if( evalKey == NULL || Mismatched(evalKey->GetCryptoContext()) ) throw std::logic_error("Information passed to ReEncrypt was not generated with this crypto context"); if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) ) throw std::logic_error("The ciphertext passed to ReEncrypt was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); Ciphertext<Element> newCiphertext = GetEncryptionAlgorithm()->ReEncrypt(evalKey, ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpReEncrypt, currentDateTime() - start) ); } return newCiphertext; } /** * read instream for a serialized ciphertext. deserialize, re-encrypt, serialize, and write to outstream * @param evalKey - reference to the re-encryption key * @param instream - input stream with sequence of serialized ciphertext * @param outstream - output stream with sequence of serialized re-encrypted ciphertext */ void ReEncryptStream( const LPEvalKey<Element> evalKey, std::istream& instream, std::ostream& outstream) { // NOTE timing this operation is not supported if( evalKey == NULL || Mismatched(evalKey->GetCryptoContext()) ) throw std::logic_error("Information passed to ReEncryptStream was not generated with this crypto context"); Serialized serObj; while( SerializableHelper::StreamToSerialization(instream, &serObj) ) { Ciphertext<Element> ct; ct = deserializeCiphertext(serObj); if( ct ) { Ciphertext<Element> reCt = ReEncrypt(evalKey, ct); Serialized serReObj; if( reCt->Serialize(&serReObj) ) { SerializableHelper::SerializationToStream(serReObj, outstream); } else { return; } } else { return; } } } /** * EvalAdd - PALISADE EvalAdd method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 + ct2 */ Ciphertext<Element> EvalAdd(const Ciphertext<Element> ct1, const Ciphertext<Element> ct2) const { TypeCheck(ct1, ct2); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAdd(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAdd, currentDateTime() - start) ); } return rv; } /** * EvalAddMatrix - PALISADE EvalAdd method for a pair of matrices of ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalAddMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 + *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalSub - PALISADE EvalSub method for a pair of ciphertexts * @param ct1 * @param ct2 * @return new ciphertext for ct1 - ct2 */ Ciphertext<Element> EvalSub(const Ciphertext<Element> ct1, const Ciphertext<Element> ct2) const { TypeCheck(ct1, ct2); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalSub(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSub, currentDateTime() - start) ); } return rv; } /** * EvalSubMatrix - PALISADE EvalSub method for a pair of matrices of ciphertexts * @param ct1 * @param ct2 * @return new matrix for ct1 + ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalSubMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 - *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalAdd - PALISADE EvalAdd method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext + plaintext */ Ciphertext<Element> EvalAdd(const Ciphertext<Element> ciphertext, const Plaintext plaintext) const { TypeCheck(ciphertext, plaintext); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAdd(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAddPlain, currentDateTime() - start) ); } return rv; } /** * EvalSubPlain - PALISADE EvalSub method for a ciphertext and plaintext * @param ciphertext * @param plaintext * @return new ciphertext for ciphertext - plaintext */ Ciphertext<Element> EvalSub(const Ciphertext<Element> ciphertext, const Plaintext plaintext) const { TypeCheck(ciphertext, plaintext); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalSub(ciphertext, plaintext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalSubPlain, currentDateTime() - start) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - with key switching * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMult(const Ciphertext<Element> ct1, const Ciphertext<Element> ct2) const { TypeCheck(ct1, ct2); auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2, ek[0]); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, currentDateTime() - start) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for a pair of ciphertexts - no key switching (relinearization) * @param ct1 * @param ct2 * @return new ciphertext for ct1 * ct2 */ Ciphertext<Element> EvalMultNoRelin(const Ciphertext<Element> ct1, const Ciphertext<Element> ct2) const { TypeCheck(ct1, ct2); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, ct2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, currentDateTime() - start) ); } return rv; } /** * EvalMultMany - PALISADE function for evaluating multiplication on ciphertext followed by relinearization operation (at the end). * It computes the multiplication in a binary tree manner. Also, it reduces the number of * elements in the ciphertext to two after each multiplication. * Currently it assumes that the consecutive two input arguments have * total depth smaller than the supported depth. Otherwise, it throws an error. * * @param cipherTextList is the ciphertext list. * * @return new ciphertext. */ Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& ct) const{ const auto ek = GetEvalMultKeyVector(ct[0]->GetKeyTag()); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMultMany(ct, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, currentDateTime() - start) ); } return rv; } /** * Function for evaluating multiplication on ciphertext followed by relinearization operation. * Currently it assumes that the input arguments have total depth smaller than the supported depth. Otherwise, it throws an error. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * * @return new ciphertext */ Ciphertext<Element> EvalMultAndRelinearize(const Ciphertext<Element> ct1, const Ciphertext<Element> ct2) const{ const auto ek = GetEvalMultKeyVector(ct1->GetKeyTag()); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMultAndRelinearize(ct1, ct2, ek); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, currentDateTime() - start) ); } return rv; } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param pt2 * @param ct1 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMult(const Plaintext pt2, const Ciphertext<Element> ct1) const { return EvalMult(ct1, pt2); } /** * EvalMult - PALISADE EvalMult method for plaintext * ciphertext * @param ct1 * @param pt2 * @return new ciphertext for ct1 * pt2 */ Ciphertext<Element> EvalMult(const Ciphertext<Element> ct1, const Plaintext pt2) const { TypeCheck(ct1, pt2); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalMult(ct1, pt2); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMult, currentDateTime() - start) ); } return rv; } /** * EvalMultMatrix - PALISADE EvalMult method for two matrices of ciphertext * @param ct1 * @param ct2 * @return new matrix for ct1 * ct2 */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalMultMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct1, const shared_ptr<Matrix<RationalCiphertext<Element>>> ct2) const { TypeCheck((*ct1)(0,0), (*ct2)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited double start = 0; if( doTiming ) start = currentDateTime(); Matrix<RationalCiphertext<Element>> rv = *ct1 * *ct2; if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalMultMatrix, currentDateTime() - start) ); } shared_ptr<Matrix<RationalCiphertext<Element>>> a(new Matrix<RationalCiphertext<Element>>(rv)); return a; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ Ciphertext<Element> EvalNegate(const Ciphertext<Element> ct) const { if (ct == NULL || Mismatched(ct->GetCryptoContext()) ) throw std::logic_error("Information passed to EvalNegate was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalNegate(ct); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalNeg, currentDateTime() - start) ); } return rv; } /** * EvalSub - PALISADE Negate method for a ciphertext * @param ct * @return new ciphertext -ct */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalNegateMatrix(const shared_ptr<Matrix<RationalCiphertext<Element>>> ct) const { if (ct == NULL || Mismatched((*ct)(0,0).GetCryptoContext()) ) throw std::logic_error("Information passed to EvalNegateMatrix was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); shared_ptr<Matrix<RationalCiphertext<Element>>> m( new Matrix<RationalCiphertext<Element>>(ct->GetAllocator(), ct->GetRows(), ct->GetCols())); for( size_t r = 0; r < m->GetRows(); r++ ) for( size_t c = 0; c < m->GetCols(); c++ ) (*m)(r,c) = -((*ct)(r,c)); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalNegMatrix, currentDateTime() - start) ); } return m; } /** * Generate automophism keys for a given private key * * @param publicKey original public key. * @param origPrivateKey original private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys; index 0 of the vector corresponds to plaintext index 2, index 1 to plaintex index 3, etc. */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint> &indexList) const { if( publicKey == NULL || origPrivateKey == NULL ) PALISADE_THROW( type_error, "Null Keys"); if( publicKey->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Key was not created in this CryptoContextImpl"); if( publicKey->GetCryptoContext() != origPrivateKey->GetCryptoContext() ) PALISADE_THROW( type_error, "Keys were not created in the same CryptoContextImpl"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(publicKey, origPrivateKey, indexList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismKeyGen, currentDateTime() - start) ); } return rv; } /** * Function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalAutomorphism(const Ciphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { auto mf = evalKeys.begin(); if( mf == evalKeys.end() ) PALISADE_THROW( type_error, "Empty key map"); auto tk = mf->second; if( ciphertext == NULL || tk == NULL ) PALISADE_THROW( type_error, "Null inputs"); if( ciphertext->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Ciphertext was not created in this CryptoContextImpl"); if( ciphertext->GetCryptoContext() != tk->GetCryptoContext() ) PALISADE_THROW( type_error, "Items were not created in the same CryptoContextImpl"); if( ciphertext->GetKeyTag() != tk->GetKeyTag() ) PALISADE_THROW( type_error, "Items were not encrypted with same keys" ); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphism(ciphertext, i, evalKeys); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismI, currentDateTime() - start) ); } return rv; } /** * Generate automophism keys for a given private key; Uses the private key for encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<usint> &indexList) const { if( privateKey == NULL ) PALISADE_THROW( type_error, "Null input"); if( privateKey->GetCryptoContext().get() != this ) PALISADE_THROW( type_error, "Key was not created in this CryptoContextImpl"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalAutomorphismKeyGen(privateKey, indexList); if( doTiming ) { timeSamples->push_back( TimingInfo(OpEvalAutomorphismK, currentDateTime() - start) ); } return rv; } /** * EvalSumKeyGen Generates the key map to be used by evalsum * * @param privateKey private key. * @param publicKey public key (used in NTRU schemes). */ void EvalSumKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey = nullptr); /** * GetEvalSumKey returns the map * * @return the EvalSum key map */ static const std::map<usint, LPEvalKey<Element>>& GetEvalSumKeyMap(const string& id); static const std::map<string,shared_ptr<std::map<usint, LPEvalKey<Element>>>>& GetAllEvalSumKeys(); /** * Function for evaluating a sum of all components * * @param ciphertext the input ciphertext. * @param batchSize size of the batch * @return resulting ciphertext */ Ciphertext<Element> EvalSum(const Ciphertext<Element> ciphertext, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2, usint batchSize) const; /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(const Ciphertext<Element> ciphertext1, const Plaintext ciphertext2, usint batchSize) const; /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors; default is 0 meaning to use the full length of the vector * @return sum(x_i*y_i), i.e., a sum of inner products */ Ciphertext<Element> EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart = 0, usint length = 0) const; /** * EvalLinRegressBatched- Computes the parameter vector for linear regression using the least squares method * Supported only in batched mode; currently works only for two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize) const; /** * EvalLinRegression - Computes the parameter vector for linear regression using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { TypeCheck((*x)(0,0), (*y)(0,0)); // TODO only checking one; when Matrix is refactored, this should be revisited double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->EvalLinRegression(x, y); if( doTiming ) { timeSamples->push_back( TimingInfo(OpLinRegression, currentDateTime() - start) ); } return rv; } /** * KeySwitch - PALISADE KeySwitch method * @param keySwitchHint - reference to KeySwitchHint * @param ciphertext - vector of ciphertext * @return new CiphertextImpl after applying key switch */ Ciphertext<Element> KeySwitch( const LPEvalKey<Element> keySwitchHint, const Ciphertext<Element> ciphertext) const { if( keySwitchHint == NULL || Mismatched(keySwitchHint->GetCryptoContext()) ) throw std::logic_error("Key passed to KeySwitch was not generated with this crypto context"); if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) ) throw std::logic_error("Ciphertext passed to KeySwitch was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->KeySwitch(keySwitchHint, ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpKeySwitch, currentDateTime() - start) ); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ Ciphertext<Element> ModReduce(Ciphertext<Element> ciphertext) const { if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) ) throw std::logic_error("Information passed to ModReduce was not generated with this crypto context"); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->ModReduce(ciphertext); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, currentDateTime() - start) ); } return rv; } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ RationalCiphertext<Element> ModReduceRational(RationalCiphertext<Element> ciphertext) const { double start = 0; if( doTiming ) start = currentDateTime(); Ciphertext<Element> n = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetNumerator()); Ciphertext<Element> d = GetEncryptionAlgorithm()->ModReduce(ciphertext.GetDenominator()); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduce, currentDateTime() - start) ); } return RationalCiphertext<Element>(n,d); } /** * ModReduce - PALISADE ModReduce method * @param ciphertext - vector of ciphertext * @return vector of mod reduced ciphertext */ shared_ptr<Matrix<RationalCiphertext<Element>>> ModReduceMatrix(shared_ptr<Matrix<RationalCiphertext<Element>>> ciphertext) const { // needs context check double start = 0; if( doTiming ) start = currentDateTime(); shared_ptr<Matrix<RationalCiphertext<Element>>> m( new Matrix<RationalCiphertext<Element>>(ciphertext->GetAllocator(), ciphertext->GetRows(), ciphertext->GetCols())); for( size_t r = 0; r < m->GetRows(); r++ ) for( size_t c = 0; c < m->GetCols(); c++ ) (*m)(r,c) = ModReduceRational((*ciphertext)(r,c)); if( doTiming ) { timeSamples->push_back( TimingInfo(OpModReduceMatrix, currentDateTime() - start) ); } return m; } /** * LevelReduce - PALISADE LevelReduce method * @param cipherText1 * @param linearKeySwitchHint * @return vector of level reduced ciphertext */ Ciphertext<Element> LevelReduce(const Ciphertext<Element> cipherText1, const LPEvalKeyNTRU<Element> linearKeySwitchHint) const { if( cipherText1 == NULL || linearKeySwitchHint == NULL || Mismatched(cipherText1->GetCryptoContext()) || Mismatched(linearKeySwitchHint->GetCryptoContext()) ) { throw std::logic_error("Information passed to LevelReduce was not generated with this crypto context"); } double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->LevelReduce(cipherText1, linearKeySwitchHint); if( doTiming ) { timeSamples->push_back( TimingInfo(OpLevelReduce, currentDateTime() - start) ); } return rv; } /** * RingReduce - PALISADE RingReduce method * @param ciphertext - vector of ciphertext * @param keySwitchHint - the keySwitchHint from original private key to sparse private key * @return vector of ring-reduced ciphertexts */ Ciphertext<Element> RingReduce( Ciphertext<Element> ciphertext, const LPEvalKey<Element> keySwitchHint) const { if( keySwitchHint == NULL || Mismatched(keySwitchHint->GetCryptoContext()) ) throw std::logic_error("Key passed to RingReduce was not generated with this crypto context"); if( ciphertext == NULL || Mismatched(ciphertext->GetCryptoContext()) ) throw std::logic_error("Ciphertext passed to RingReduce was not generated with this crypto context"); Ciphertext<Element> newCiphertext; double start = 0; if( doTiming ) start = currentDateTime(); newCiphertext = GetEncryptionAlgorithm()->RingReduce(ciphertext, keySwitchHint); if( doTiming ) { timeSamples->push_back( TimingInfo(OpRingReduce, currentDateTime() - start) ); } return newCiphertext; } /** * ComposedEvalMult - PALISADE composed evalmult * @param ciphertext1 - vector for first cipher text * @param ciphertext2 - vector for second cipher text * @param quadKeySwitchHint - is the quadratic key switch hint from original private key to the quadratic key * return vector of resulting ciphertext */ Ciphertext<Element> ComposedEvalMult( const Ciphertext<Element> ciphertext1, const Ciphertext<Element> ciphertext2) const { if( ciphertext1 == NULL || ciphertext2 == NULL || ciphertext1->GetKeyTag() != ciphertext2->GetKeyTag() || Mismatched(ciphertext1->GetCryptoContext()) ) throw std::logic_error("Ciphertexts passed to ComposedEvalMult were not generated with this crypto context"); auto ek = GetEvalMultKeyVector(ciphertext1->GetKeyTag()); double start = 0; if( doTiming ) start = currentDateTime(); auto rv = GetEncryptionAlgorithm()->ComposedEvalMult(ciphertext1, ciphertext2, ek[0]); if( doTiming ) { timeSamples->push_back( TimingInfo(OpComposedEvalMult, currentDateTime() - start) ); } return rv; } /** * Deserialize into a Public Key * @param serObj * @return deserialized object */ static LPPublicKey<Element> deserializePublicKey(const Serialized& serObj); /** * Deserialize into a Private Key * @param serObj * @return deserialized object */ static LPPrivateKey<Element> deserializeSecretKey(const Serialized& serObj); /** * Deserialize into a Ciphertext * @param serObj * @return deserialized object */ static Ciphertext<Element> deserializeCiphertext(const Serialized& serObj); /** * Deserialize into an Eval Key in a given context * @param serObj * @return deserialized object */ static LPEvalKey<Element> deserializeEvalKey(const Serialized& serObj); /** * Deserialize into an Eval Key * @param serObj * @return deserialized object */ static LPEvalKey<Element> deserializeEvalKeyInContext(const Serialized& serObj, CryptoContext<Element> cc); }; /** * @brief CryptoObject * * A class to aid in referring to the crypto context that an object belongs to */ template<typename Element> class CryptoObject { protected: CryptoContext<Element> context; /*!< crypto context this object belongs to */ string keyTag; /*!< tag used to find the evaluation key needed for SHE/FHE operations */ public: CryptoObject(CryptoContext<Element> cc = 0, const string& tag = "") : context(cc), keyTag(tag) {} CryptoObject(const CryptoObject& rhs) { context = rhs.context; keyTag = rhs.keyTag; } CryptoObject(const CryptoObject&& rhs) { context = std::move(rhs.context); keyTag = std::move(rhs.keyTag); } virtual ~CryptoObject() {} const CryptoObject& operator=(const CryptoObject& rhs) { this->context = rhs.context; this->keyTag = rhs.keyTag; return *this; } const CryptoObject& operator=(const CryptoObject&& rhs) { this->context = std::move(rhs.context); this->keyTag = std::move(rhs.keyTag); return *this; } bool operator==(const CryptoObject& rhs) const { return context.get() == rhs.context.get() && keyTag == rhs.keyTag; } CryptoContext<Element> GetCryptoContext() const { return context; } const shared_ptr<LPCryptoParameters<Element>> GetCryptoParameters() const { return context->GetCryptoParameters(); } const EncodingParams GetEncodingParameters() const { return context->GetCryptoParameters()->GetEncodingParams(); } const string GetKeyTag() const { return keyTag; } void SetKeyTag(const string& tag) { keyTag = tag; } /** * SerializeCryptoObject serializes this header into a Serialized * @param serObj is used to store the serialized result. * @return true if successfully serialized */ bool SerializeCryptoObject(Serialized* serObj, bool includeContext = true) const; /** * DeserializeCryptoObject Populates this header from the deserialization of the Serialized * @param serObj contains the serialized object * @return true on success */ bool DeserializeCryptoObject(const Serialized& serObj, bool includesContext = true); }; /** * @brief CryptoContextFactory * * A class that contains static methods to generate new crypto contexts from user parameters * */ template<typename Element> class CryptoContextFactory { static vector<CryptoContext<Element>> AllContexts; public: static void ReleaseAllContexts(); static int GetContextCount(); static CryptoContext<Element> GetSingleContext(); static CryptoContext<Element> GetContext( shared_ptr<LPCryptoParameters<Element>> params, shared_ptr<LPPublicKeyEncryptionScheme<Element>> scheme); static CryptoContext<Element> GetContextForPointer( CryptoContextImpl<Element>* cc); static const vector<CryptoContext<Element>>& GetAllContexts() { return AllContexts; } /** * construct a PALISADE CryptoContextImpl for the LTV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param depth * @return new context */ static CryptoContext<Element> genCryptoContextLTV(shared_ptr<typename Element::Params> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the LTV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param depth * @return new context */ static CryptoContext<Element> genCryptoContextLTV(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, usint relinWindow, float stDev, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the LTV Scheme using the scheme's ParamsGen methods * @param plaintextModulus * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextLTV( const PlaintextModulus plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches); /** * construct a PALISADE CryptoContextImpl for the LTV Scheme using the scheme's ParamsGen methods * @param encodingParams * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextLTV( EncodingParams encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param delta * @param mode * @param bigmodulus * @param bigrootofunity * @param depth * @param assuranceMeasure * @param securityLevel * @param bigmodulusarb * @param bigrootofunityarb * @return new context */ static CryptoContext<Element> genCryptoContextBFV(shared_ptr<typename Element::Params> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param delta * @param mode * @param bigmodulus * @param bigrootofunity * @param depth * @param assuranceMeasure * @param securityLevel * @param bigmodulusarb * @param bigrootofunityarb * @return new context */ static CryptoContext<Element> genCryptoContextBFV(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, usint relinWindow, float stDev, const std::string& delta, MODE mode = RLWE, const std::string& bigmodulus = "0", const std::string& bigrootofunity = "0", int depth = 0, int assuranceMeasure = 0, float securityLevel = 0, const std::string& bigmodulusarb = "0", const std::string& bigrootofunityarb = "0", int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods * @param plaintextModulus * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextBFV( const PlaintextModulus plaintextModulus, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFV Scheme using the scheme's ParamsGen methods * @param encodingParams * @param securityLevel * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextBFV( EncodingParams encodingParams, float securityLevel, usint relinWindow, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods * @param plaintextModulus * @param securityLevel * @param distribution parameter * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( const PlaintextModulus plaintextModulus, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BFVrns Scheme using the scheme's ParamsGen methods * @param encodingParams * @param securityLevel * @param distribution parameter * @param numAdds * @param numMults * @param numKeyswitches * @return new context */ static CryptoContext<Element> genCryptoContextBFVrns( EncodingParams encodingParams, float securityLevel, float dist, unsigned int numAdds, unsigned int numMults, unsigned int numKeyswitches, MODE mode = OPTIMIZED, int maxDepth = 2); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static CryptoContext<Element> genCryptoContextBGV(shared_ptr<typename Element::Params> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the BGV Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param mode * @return new context */ static CryptoContext<Element> genCryptoContextBGV(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, usint relinWindow, float stDev, MODE mode = RLWE, int depth = 1); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param plaintextmodulus * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param stDevStSt * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params, const PlaintextModulus plaintextmodulus, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the StehleSteinfeld Scheme * @param encodingParams * @param ringdim * @param modulus * @param rootOfUnity * @param relinWindow * @param stDev * @param stDevStSt * @return new context */ static CryptoContext<Element> genCryptoContextStehleSteinfeld(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, usint relinWindow, float stDev, float stDevStSt, int depth = 1, int assuranceMeasure = 9, float securityLevel = 1.006); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param plaintext modulus * @return */ static CryptoContext<Element> genCryptoContextNull(unsigned int m, const PlaintextModulus ptModulus); /** * construct a PALISADE CryptoContextImpl for the Null Scheme * @param encodingParams * @return */ static CryptoContext<Element> genCryptoContextNull(unsigned int m, EncodingParams encodingParams); /** * Create a PALISADE CryptoContextImpl from a serialization * @param serObj * @return new context */ static CryptoContext<Element> DeserializeAndCreateContext(const Serialized& serObj); }; } #endif /* SRC_DEMO_PRE_CRYPTOCONTEXT_H_ */
api_calls_places.c
// RUN: %libomp-compile && env OMP_PLACES=cores %libomp-run | FileCheck %s // REQUIRES: ompt, linux #include "callback.h" #include <omp.h> #define __USE_GNU #include <sched.h> #undef __USE_GNU void print_list(char *function_name, int size, int list[]) { printf("%" PRIu64 ": %s(0)=(%d", ompt_get_thread_data()->value, function_name, list[0]); int i; for (i = 1; i < size; i++) { printf(",%d", list[i]); } printf(")\n"); } int main() { #pragma omp parallel num_threads(1) { printf("%" PRIu64 ": omp_get_num_places()=%d\n", ompt_get_thread_data()->value, omp_get_num_places()); printf("%" PRIu64 ": ompt_get_num_places()=%d\n", ompt_get_thread_data()->value, ompt_get_num_places()); int omp_ids_size = omp_get_place_num_procs(0); int omp_ids[omp_ids_size]; omp_get_place_proc_ids(0, omp_ids); print_list("omp_get_place_proc_ids", omp_ids_size, omp_ids); int ompt_ids_size = ompt_get_place_proc_ids(0, 0, NULL); int ompt_ids[ompt_ids_size]; ompt_get_place_proc_ids(0, ompt_ids_size, ompt_ids); print_list("ompt_get_place_proc_ids", ompt_ids_size, ompt_ids); printf("%" PRIu64 ": omp_get_place_num()=%d\n", ompt_get_thread_data()->value, omp_get_place_num()); printf("%" PRIu64 ": ompt_get_place_num()=%d\n", ompt_get_thread_data()->value, ompt_get_place_num()); int omp_nums_size = omp_get_partition_num_places(); int omp_nums[omp_nums_size]; omp_get_partition_place_nums(omp_nums); print_list("omp_get_partition_place_nums", omp_nums_size, omp_nums); int ompt_nums_size = ompt_get_partition_place_nums(0, NULL); int ompt_nums[ompt_nums_size]; ompt_get_partition_place_nums(ompt_nums_size, ompt_nums); print_list("ompt_get_partition_place_nums", ompt_nums_size, ompt_nums); printf("%" PRIu64 ": sched_getcpu()=%d\n", ompt_get_thread_data()->value, sched_getcpu()); printf("%" PRIu64 ": ompt_get_proc_id()=%d\n", ompt_get_thread_data()->value, ompt_get_proc_id()); printf("%" PRIu64 ": omp_get_num_procs()=%d\n", ompt_get_thread_data()->value, omp_get_num_procs()); printf("%" PRIu64 ": ompt_get_num_procs()=%d\n", ompt_get_thread_data()->value, ompt_get_num_procs()); } // Check if libomp supports the callbacks for this test. // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: omp_get_num_places // CHECK-SAME: ()=[[NUM_PLACES:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_get_num_places()=[[NUM_PLACES]] // CHECK: {{^}}[[MASTER_ID]]: omp_get_place_proc_ids // CHECK-SAME: (0)=([[PROC_IDS:[0-9\,]+]]) // CHECK: {{^}}[[MASTER_ID]]: ompt_get_place_proc_ids(0)=([[PROC_IDS]]) // CHECK: {{^}}[[MASTER_ID]]: omp_get_place_num()=[[PLACE_NUM:[-]?[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_get_place_num()=[[PLACE_NUM]] // CHECK: {{^}}[[MASTER_ID]]: omp_get_partition_place_nums // CHECK-SAME: (0)=([[PARTITION_PLACE_NUMS:[0-9\,]+]]) // CHECK: {{^}}[[MASTER_ID]]: ompt_get_partition_place_nums // CHECK-SAME: (0)=([[PARTITION_PLACE_NUMS]]) // CHECK: {{^}}[[MASTER_ID]]: sched_getcpu()=[[CPU_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_get_proc_id()=[[CPU_ID]] // CHECK: {{^}}[[MASTER_ID]]: omp_get_num_procs()=[[NUM_PROCS:[-]?[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_get_num_procs()=[[NUM_PROCS]] return 0; }
GB_unaryop__lnot_uint64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_bool // op(A') function: GB_tran__lnot_uint64_bool // C type: uint64_t // A type: bool // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_bool ( uint64_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
laplace_acc.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <openacc.h> #include <omp.h> // grid size #define GRIDY 8192 #define GRIDX 8192 // smallest permitted change in temperature #define MAX_TEMP_ERROR 0.02 double T_new[GRIDX+2][GRIDY+2]; // temperature grid double T[GRIDX+2][GRIDY+2]; // temperature grid from last iteration // initialisation routine void init(); int main(int argc, char *argv[]) { int i, j; // grid indexes int max_iterations; // maximal number of iterations int iteration=1; // iteration double dt=100; // largest change in temperature struct timeval start_time, stop_time, elapsed_time; // timers int num_threads = 1; // number of OpenMP threads int thread_id = 0; // thread ID int num_devices = 1; // number of GPU devices int device_id = 0; // device ID int chunk_size = 0; // grid size per GPU (X direction) int i_start, i_end; // starting and ending index per GPU (X direction) double dt_global; if(argc!=2) { printf("Usage: %s number_of_iterations\n",argv[0]); exit(1); } else { max_iterations=atoi(argv[1]); } gettimeofday(&start_time,NULL); init(); #pragma omp parallel default(shared) firstprivate(num_threads, thread_id, num_devices, device_id, i_start, i_end, chunk_size,dt,iteration,i,j) { num_threads = omp_get_num_threads(); thread_id = omp_get_thread_num(); num_devices = acc_get_num_devices(acc_device_nvidia); device_id = thread_id % num_devices; acc_set_device_num(device_id, acc_device_nvidia); // calculate the chunk size based on number of threads chunk_size=ceil((1.0*GRIDX)/num_threads); // calculate boundaries and process only inner region i_start = thread_id * chunk_size + 1; i_end = i_start + chunk_size - 1; // simulation iterations #pragma acc data copy(T), create(T_new) while ( dt > MAX_TEMP_ERROR && iteration <= max_iterations ) { // main computational kernel, average over neighbours in the grid #pragma acc kernels for(i = i_start; i <= i_end; i++) for(j = 1; j <= GRIDY; j++) T_new[i][j] = 0.25 * (T[i+1][j] + T[i-1][j] + T[i][j+1] + T[i][j-1]); #pragma acc update self(T[i_start:1][1:GRIDY],T[i_end:1][1:GRIDY]) #pragma omp barrier #pragma acc update device(T[(i_start-1):1][1:GRIDY],T[(i_end+1):1][1:GRIDY]) // reset dt dt = 0.0; #pragma omp single dt_global = 0.0; #pragma omp barrier // compute the largest change and copy T_new to T #pragma acc kernels for(i = i_start; i <= i_end; i++){ for(j = 1; j <= GRIDY; j++){ dt = fmax( fabs(T_new[i][j]-T[i][j]), dt); T[i][j] = T_new[i][j]; } } #pragma omp critical dt_global = fmax(dt,dt_global); #pragma omp barrier dt=dt_global; // periodically print largest change #pragma omp master if((iteration % 100) == 0) printf("Iteration %4.0d, dt %f\n",iteration,dt); iteration++; } } gettimeofday(&stop_time,NULL); timersub(&stop_time, &start_time, &elapsed_time); // measure time printf("Total time was %f seconds.\n", elapsed_time.tv_sec+elapsed_time.tv_usec/1000000.0); return 0; } // initialize grid and boundary conditions void init(){ int i,j; for(i = 0; i <= GRIDX+1; i++){ for (j = 0; j <= GRIDY+1; j++){ T[i][j] = 0.0; } } // these boundary conditions never change throughout run // set left side to 0 and right to a linear increase for(i = 0; i <= GRIDX+1; i++) { T[i][0] = 0.0; T[i][GRIDY+1] = (128.0/GRIDX)*i; } // set top to 0 and bottom to linear increase for(j = 0; j <= GRIDY+1; j++) { T[0][j] = 0.0; T[GRIDX+1][j] = (128.0/GRIDY)*j; } }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
laplace_cpu.c
/** CPU Laplace solver using optimized red-black Gauss–Seidel with SOR solver * \file main_cpu.c * * \author Kyle E. Niemeyer * \date 09/21/2012 * * Solves Laplace's equation in 2D (e.g., heat conduction in a rectangular plate) * using the red-black Gauss–Seidel with sucessive overrelaxation (SOR) that has * been "optimized". This means that the red and black kernels only loop over * their respective cells, instead of over all cells and skipping even/odd cells. * * Boundary conditions: * T = 0 at x = 0, x = L, y = 0 * T = TN at y = H */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include "timer.h" /** Problem size along one side; total number of cells is this squared */ #define NUM 2048 /** Double precision */ //#define DOUBLE #ifdef DOUBLE #define Real double #define ZERO 0.0 #define ONE 1.0 #define TWO 2.0 /** SOR relaxation parameter */ const Real omega = 1.85; #else #define Real float #define ZERO 0.0f #define ONE 1.0f #define TWO 2.0f /** SOR relaxation parameter */ const Real omega = 1.85f; #endif // OpenACC #ifdef _OPENACC #include <openacc.h> #endif // OpenMP #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #endif #if __STDC_VERSION__ < 199901L #define restrict __restrict__ #endif #define SIZE (NUM * NUM) #define SIZET (NUM * NUM/2 + 3*NUM + 4) /** Function to evaluate coefficient matrix and right-hand side vector. * * \param[in] rowmax number of rows * \param[in] colmax number of columns * \param[in] th_cond thermal conductivity * \param[in] dx grid size in x dimension (uniform) * \param[in] dy grid size in y dimension (uniform) * \param[in] width width of plate (z dimension) * \param[in] TN temperature at top boundary * \param[out] aP array of self coefficients * \param[out] aW array of west neighbor coefficients * \param[out] aE array of east neighbor coefficients * \param[out] aS array of south neighbor coefficients * \param[out] aN array of north neighbor coefficients * \param[out] b right-hand side array */ void fill_coeffs (int rowmax, int colmax, Real th_cond, Real dx, Real dy, Real width, Real TN, Real * aP, Real * aW, Real * aE, Real * aS, Real * aN, Real * b) { int col, row; for (col = 0; col < colmax; ++col) { for (row = 0; row < rowmax; ++row) { int ind = col * rowmax + row; b[ind] = ZERO; Real SP = ZERO; if (col == 0) { // left BC: temp = 0 aW[ind] = ZERO; SP = -TWO * th_cond * width * dy / dx; } else { aW[ind] = th_cond * width * dy / dx; } if (col == colmax - 1) { // right BC: temp = 0 aE[ind] = ZERO; SP = -TWO * th_cond * width * dy / dx; } else { aE[ind] = th_cond * width * dy / dx; } if (row == 0) { // bottom BC: temp = 0 aS[ind] = ZERO; SP = -TWO * th_cond * width * dx / dy; } else { aS[ind] = th_cond * width * dx / dy; } if (row == rowmax - 1) { // top BC: temp = TN aN[ind] = ZERO; b[ind] = TWO * th_cond * width * dx * TN / dy; SP = -TWO * th_cond * width * dx / dy; } else { aN[ind] = th_cond * width * dx / dy; } aP[ind] = aW[ind] + aE[ind] + aS[ind] + aN[ind] - SP; } // end for row } // end for col } // end fill_coeffs /////////////////////////////////////////////////////////////////////////////// /** Function to update temperature for red cells * * \param[in] aP array of self coefficients * \param[in] aW array of west neighbor coefficients * \param[in] aE array of east neighbor coefficients * \param[in] aS array of south neighbor coefficients * \param[in] aN array of north neighbor coefficients * \param[in] b right-hand side array * \param[in] temp_black temperatures of black cells, constant in this function * \param[inout] temp_red temperatures of red cells * \return norm_L2 summed residuals */ Real red_kernel (const Real *restrict aP, const Real *restrict aW, const Real *restrict aE, const Real *restrict aS, const Real *restrict aN, const Real *restrict b, const Real *restrict temp_black, Real *restrict temp_red) { Real norm_L2 = ZERO; int col, row; // loop over actual cells, skip boundary cells #pragma omp parallel for shared(aP, aW, aE, aS, aN, temp_black, temp_red) \ reduction(+:norm_L2) private(col, row) #pragma acc kernels present(aP[0:SIZE], aW[0:SIZE], aE[0:SIZE], aS[0:SIZE], aN[0:SIZE], b[0:SIZE], temp_red[0:SIZET], temp_black[0:SIZET]) #pragma acc loop independent for (col = 1; col < NUM + 1; ++col) { #pragma acc loop independent for (row = 1; row < (NUM / 2) + 1; ++row) { int ind_red = col * ((NUM / 2) + 2) + row; // local (red) index int ind = 2 * row - (col % 2) - 1 + NUM * (col - 1); // global index Real res = b[ind] + (aW[ind] * temp_black[row + (col - 1) * ((NUM / 2) + 2)] + aE[ind] * temp_black[row + (col + 1) * ((NUM / 2) + 2)] + aS[ind] * temp_black[row - (col % 2) + col * ((NUM / 2) + 2)] + aN[ind] * temp_black[row + ((col + 1) % 2) + col * ((NUM / 2) + 2)]); Real temp_old = temp_red[ind_red]; temp_red[ind_red] = temp_old * (ONE - omega) + omega * (res / aP[ind]); // calculate residual res = temp_red[ind_red] - temp_old; norm_L2 += (res * res); } // end for row } // end for col return norm_L2; } // end red_kernel /////////////////////////////////////////////////////////////////////////////// /** Function to update temperature for black cells * * \param[in] aP array of self coefficients * \param[in] aW array of west neighbor coefficients * \param[in] aE array of east neighbor coefficients * \param[in] aS array of south neighbor coefficients * \param[in] aN array of north neighbor coefficients * \param[in] b right-hand side array * \param[in] temp_red temperatures of red cells, constant in this function * \param[inout] temp_black temperatures of black cells * \return norm_L2 variable holding summed residuals */ Real black_kernel (const Real *restrict aP, const Real *restrict aW, const Real *restrict aE, const Real *restrict aS, const Real *restrict aN, const Real *restrict b, const Real *restrict temp_red, Real *restrict temp_black) { Real norm_L2 = ZERO; int col, row; // loop over actual cells, skip boundary cells #pragma omp parallel for shared(aP, aW, aE, aS, aN, temp_black, temp_red) \ reduction(+:norm_L2) private(col, row) #pragma acc kernels present(aP[0:SIZE], aW[0:SIZE], aE[0:SIZE], aS[0:SIZE], aN[0:SIZE], b[0:SIZE], temp_red[0:SIZET], temp_black[0:SIZET]) #pragma acc loop independent for (col = 1; col < NUM + 1; ++col) { #pragma acc loop independent for (row = 1; row < (NUM / 2) + 1; ++row) { int ind_black = col * ((NUM / 2) + 2) + row; // local (black) index int ind = 2 * row - ((col + 1) % 2) - 1 + NUM * (col - 1); // global index Real res = b[ind] + (aW[ind] * temp_red[row + (col - 1) * ((NUM / 2) + 2)] + aE[ind] * temp_red[row + (col + 1) * ((NUM / 2) + 2)] + aS[ind] * temp_red[row - ((col + 1) % 2) + col * ((NUM / 2) + 2)] + aN[ind] * temp_red[row + (col % 2) + col * ((NUM / 2) + 2)]); Real temp_old = temp_black[ind_black]; temp_black[ind_black] = temp_old * (ONE - omega) + omega * (res / aP[ind]); // calculate residual res = temp_black[ind_black] - temp_old; norm_L2 += (res * res); } // end for row } // end for col return norm_L2; } // end black_kernel /////////////////////////////////////////////////////////////////////////////// /** Main function that solves Laplace's equation in 2D (heat conduction in plate) * * Contains iteration loop for red-black Gauss-Seidel with SOR */ int main (void) { // size of plate Real L = 1.0; Real H = 1.0; Real width = 0.01; // thermal conductivity Real th_cond = 1.0; // temperature at top boundary Real TN = 1.0; // SOR iteration tolerance Real tol = 1.e-6; // number of cells in x and y directions // including unused boundary cells int num_rows = (NUM / 2) + 2; int num_cols = NUM + 2; int size_temp = num_rows * num_cols; int size = NUM * NUM; // size of cells Real dx = L / NUM; Real dy = H / NUM; // iterations for Red-Black Gauss-Seidel with SOR int iter; int it_max = 1e6; // allocate memory Real *restrict aP, *restrict aW, *restrict aE, *restrict aS, *restrict aN, *restrict b; Real *restrict temp_red, *restrict temp_black; // arrays of coefficients aP = (Real *) calloc (size, sizeof(Real)); aW = (Real *) calloc (size, sizeof(Real)); aE = (Real *) calloc (size, sizeof(Real)); aS = (Real *) calloc (size, sizeof(Real)); aN = (Real *) calloc (size, sizeof(Real)); // RHS b = (Real *) calloc (size, sizeof(Real)); // temperature arrays for red and black cells temp_red = (Real *) calloc (size_temp, sizeof(Real)); temp_black = (Real *) calloc (size_temp, sizeof(Real)); // set coefficients fill_coeffs (NUM, NUM, th_cond, dx, dy, width, TN, aP, aW, aE, aS, aN, b); int i; for (i = 0; i < size_temp; ++i) { temp_red[i] = ZERO; temp_black[i] = ZERO; } #ifdef _OPENACC // initialize device acc_init(acc_device_nvidia); acc_set_device_num(0, acc_device_nvidia); #endif // print problem info printf("Problem size: %d x %d \n", NUM, NUM); printf("Max threads: %d\n", omp_get_max_threads()); ////////////////////////////// // start timer //clock_t start_time = clock(); StartTimer(); ////////////////////////////// // red-black Gauss-Seidel with SOR iteration loop #pragma acc data copyin(aP[0:SIZE], aW[0:SIZE], aE[0:SIZE], aS[0:SIZE], aN[0:SIZE], b[0:SIZE]) \ copy(temp_red[0:SIZET], temp_black[0:SIZET]) for (iter = 1; iter <= it_max; ++iter) { Real norm_L2 = ZERO; // update red cells norm_L2 += red_kernel (aP, aW, aE, aS, aN, b, temp_black, temp_red); // update black cells norm_L2 += black_kernel (aP, aW, aE, aS, aN, b, temp_red, temp_black); // calculate residual norm_L2 = sqrt(norm_L2 / ((Real)size)); if (iter % 100 == 0) printf("%5d, %0.6f\n", iter, norm_L2); // if tolerance has been reached, end SOR iterations if (norm_L2 < tol) { break; } } ///////////////////////////////// // end timer //clock_t end_time = clock(); double runtime = GetTimer(); ///////////////////////////////// #if defined(_OPENMP) printf("OpenMP\n"); #elif defined(_OPENACC) printf("OpenACC\n"); #else printf("CPU\n"); #endif printf("Iterations: %i\n", iter); //printf("Time: %f\n", (end_time - start_time) / (double)CLOCKS_PER_SEC); printf("Total time: %f s\n", runtime / 1000); // write temperature data to file FILE * pfile; pfile = fopen("temp_cpu.dat", "w"); if (pfile != NULL) { fprintf(pfile, "#x\ty\ttemp(K)\n"); int row, col; for (row = 1; row < NUM + 1; ++row) { for (col = 1; col < NUM + 1; ++col) { Real x_pos = (col - 1) * dx + (dx / 2); Real y_pos = (row - 1) * dy + (dy / 2); if ((row + col) % 2 == 0) { // even, so red cell int ind = col * num_rows + (row + (col % 2)) / 2; fprintf(pfile, "%f\t%f\t%f\n", x_pos, y_pos, temp_red[ind]); } else { // odd, so black cell int ind = col * num_rows + (row + ((col + 1) % 2)) / 2; fprintf(pfile, "%f\t%f\t%f\n", x_pos, y_pos, temp_black[ind]); } } fprintf(pfile, "\n"); } } fclose(pfile); // free memory free(aP); free(aW); free(aE); free(aS); free(aN); free(b); free(temp_red); free(temp_black); return 0; }
scalar_product.h
//============================================================================== // // Copyright 2018 The InsideLoop Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //============================================================================== #ifndef IL_SCALAR_PRODUCT_H #define IL_SCALAR_PRODUCT_H #include <il/Array.h> #include <il/benchmark/tools/memory/memory.h> #include <il/benchmark/tools/timer/Benchmark.h> #include <cstdio> #ifdef IL_TBB #include <tbb/tbb.h> #endif #ifdef IL_CILK #include <cilk/cilk.h> #include <cilk/reducer_opadd.h> #endif void scalar_product() { std::printf( "****************************************************************" "****************\n"); std::printf("* Scalar product\n"); std::printf( "****************************************************************" "****************\n"); il::Array<il::int_t> size{ il::value, {100, 1000, 10000, 100000, 1000000, 10000000, 100000000}}; for (il::int_t n : size) { std::printf("Size of array: %td\n", n); auto scalar_product_serial = [&n](il::io_t, il::BState& state) { il::Array<float> v1{n, 0.0}; il::Array<float> v2{n, 0.0}; float sum = 0.0; while (state.keep_running()) { for (il::int_t k = 0; k < v2.size(); ++k) { sum += v1[k] * v2[k]; } } il::do_not_optimize(sum); }; float time_serial{il::benchmark(scalar_product_serial) / n}; std::printf("Serial: %7.3e s\n", time_serial); auto scalar_product_sse = [&n](il::io_t, il::BState& state) { il::Array<float> v1{n, 0.0}; il::Array<float> v2{n, 0.0}; float* v1_data{v1.data()}; float* v2_data{v2.data()}; float sum = 0.0; while (state.keep_running()) { __m128 res; __m128 prd; __m128 ma; __m128 mb; for (std::size_t k = 0; k < n; k += 4) { ma = _mm_loadu_ps(&v1_data[k]); mb = _mm_loadu_ps(&v2_data[k]); prd = _mm_mul_ps(ma, mb); res = _mm_add_ps(prd, res); } prd = _mm_setzero_ps(); res = _mm_hadd_ps(res, prd); res = _mm_hadd_ps(res, prd); _mm_store_ss(&sum, res); } il::do_not_optimize(sum); }; float time_sse{il::benchmark(scalar_product_sse) / n}; std::printf(" SSE: %7.3e s, Ratio: %5.3f\n", time_sse, time_serial / time_sse); #ifdef IL_OPENMP auto scalar_product_openmp = [&n](il::io_t, il::BState& state) { il::Array<float> v1{n, 0.0}; il::Array<float> v2{n, 0.0}; float sum = 0.0; while (state.keep_running()) { #pragma omp parallel for reduction(+ : sum) for (il::int_t k = 0; k < v2.size(); ++k) { sum += v1[k] * v2[k]; } } il::do_not_optimize(sum); }; float time_openmp{il::benchmark(scalar_product_openmp) / n}; std::printf("OpenMP: %7.3e s, Ratio: %5.3f\n", time_openmp, time_serial / time_openmp); #endif #ifdef IL_TBB auto scalar_product_tbb = [&n](il::io_t, il::BState& state) { il::Array<float> v1{n, 0.0}; il::Array<float> v2{n, 0.0}; float sum = 0.0; while (state.keep_running()) { sum += tbb::parallel_reduce( tbb::blocked_range<il::int_t>(0, n), float{0.0}, [=, &v1, &v2](const tbb::blocked_range<il::int_t>& range, float in) -> float { for (il::int_t k{range.begin()}; k < range.end(); ++k) { in += v1[k] * v2[k]; } return in; }, [](float sum1, float sum2) -> float { return sum1 + sum2; }); } il::do_not_optimize(sum); }; float time_tbb{il::benchmark(scalar_product_tbb) / n}; std::printf(" TBB: %7.3e s, Ratio: %5.3f\n", time_tbb, time_serial / time_tbb); #endif #ifdef IL_CILK auto scalar_product_cilk = [&n](il::io_t, il::BState& state) { il::Array<float> v1{n, 0.0}; il::Array<float> v2{n, 0.0}; float sum = 0.0; while (state.keep_running()) { cilk::reducer_opadd<float> sum{0.0}; cilk_for(il::int_t k = 0; k < n; ++k) { sum += v1[k] * v2[k]; } sum += float{sum.get_value()}; } il::do_not_optimize(sum); }; float time_cilk{il::benchmark(scalar_product_cilk) / n}; std::printf(" Cilk: %7.3e s, Ratio: %5.3f\n", time_cilk, time_serial / time_cilk); #endif std::printf("\n"); } } #endif // IL_SCALAR_PRODUCT_H
DRB054-inneronly2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Example with loop-carried data dependence at the outer level loop. The inner level loop can be parallelized. */ int main() { int i,j; int n=100, m=100; double b[n][m]; #pragma omp parallel for private(i ,j ) for(i=0;i<n; i++) #pragma omp parallel for private(j ) for(j=0;j<n; j++) b[i][j]=(double)(i*j); for (i=1;i<n;i++) #pragma omp parallel for private(j ) for (j=1;j<m;j++) b[i][j]=b[i-1][j-1]; for(i=0;i<n; i++) for(j=0;j<n; j++) printf("%lf\n", b[i][j]); return 0; }
omp50_taskwait_depend.c
// RUN: %libomp-compile-and-run // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 // clang does not yet support taskwait with depend clause // clang-12 introduced parsing, but no codegen // TODO: update expected result when codegen in clang is added // icc does not yet support taskwait with depend clause // TODO: update expected result when support for icc is added // XFAIL: clang, icc // REQUIRES: !abt #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "omp_my_sleep.h" int a = 0, b = 0; int task_grabbed = 0, task_can_proceed = 0; int task2_grabbed = 0, task2_can_proceed = 0; static void wait_on_flag(int *flag) { int flag_value; int timelimit = 30; int secs = 0; do { #pragma omp atomic read flag_value = *flag; my_sleep(1.0); secs++; if (secs == timelimit) { fprintf(stderr, "error: timeout in wait_on_flag()\n"); exit(EXIT_FAILURE); } } while (flag_value == 0); } static void signal_flag(int *flag) { #pragma omp atomic (*flag)++; } int main(int argc, char** argv) { // Ensure two threads are running int num_threads = omp_get_max_threads(); if (num_threads < 2) omp_set_num_threads(2); #pragma omp parallel shared(a) { int a_value; // Let us be extra safe here if (omp_get_num_threads() > 1) { #pragma omp single nowait { // Schedule independent child task that // waits to be flagged after sebsequent taskwait depend() #pragma omp task { signal_flag(&task_grabbed); wait_on_flag(&task_can_proceed); } // Let another worker thread grab the task to execute wait_on_flag(&task_grabbed); // This should be ignored since the task above has // no dependency information #pragma omp taskwait depend(inout: a) // Signal the independent task to proceed signal_flag(&task_can_proceed); // Schedule child task with dependencies that taskwait does // not care about #pragma omp task depend(inout: b) { signal_flag(&task2_grabbed); wait_on_flag(&task2_can_proceed); #pragma omp atomic b++; } // Let another worker thread grab the task to execute wait_on_flag(&task2_grabbed); // This should be ignored since the task above has // dependency information on b instead of a #pragma omp taskwait depend(inout: a) // Signal the task to proceed signal_flag(&task2_can_proceed); // Generate one child task for taskwait #pragma omp task shared(a) depend(inout: a) { my_sleep(1.0); #pragma omp atomic a++; } #pragma omp taskwait depend(inout: a) #pragma omp atomic read a_value = a; if (a_value != 1) { fprintf(stderr, "error: dependent task was not executed before " "taskwait finished\n"); exit(EXIT_FAILURE); } } // #pragma omp single } // if (num_threads > 1) } // #pragma omp parallel return EXIT_SUCCESS; }
smg.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.12 $ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "smg.h" /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ void * hypre_SMGCreate( MPI_Comm comm ) { hypre_SMGData *smg_data; smg_data = hypre_CTAlloc(hypre_SMGData, 1); (smg_data -> comm) = comm; (smg_data -> time_index) = hypre_InitializeTiming("SMG"); /* set defaults */ (smg_data -> memory_use) = 0; (smg_data -> tol) = 1.0e-06; (smg_data -> max_iter) = 200; (smg_data -> rel_change) = 0; (smg_data -> zero_guess) = 0; (smg_data -> max_levels) = 0; (smg_data -> num_pre_relax) = 1; (smg_data -> num_post_relax) = 1; (smg_data -> cdir) = 2; hypre_SetIndex((smg_data -> base_index), 0, 0, 0); hypre_SetIndex((smg_data -> base_stride), 1, 1, 1); (smg_data -> logging) = 0; (smg_data -> print_level) = 0; /* initialize */ (smg_data -> num_levels) = -1; return (void *) smg_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGDestroy( void *smg_vdata ) { hypre_SMGData *smg_data = smg_vdata; HYPRE_Int l; if (smg_data) { if ((smg_data -> logging) > 0) { hypre_TFree(smg_data -> norms); hypre_TFree(smg_data -> rel_norms); } if ((smg_data -> num_levels) > -1) { for (l = 0; l < ((smg_data -> num_levels) - 1); l++) { hypre_SMGRelaxDestroy(smg_data -> relax_data_l[l]); hypre_SMGResidualDestroy(smg_data -> residual_data_l[l]); hypre_SemiRestrictDestroy(smg_data -> restrict_data_l[l]); hypre_SemiInterpDestroy(smg_data -> interp_data_l[l]); } hypre_SMGRelaxDestroy(smg_data -> relax_data_l[l]); if (l == 0) { hypre_SMGResidualDestroy(smg_data -> residual_data_l[l]); } hypre_TFree(smg_data -> relax_data_l); hypre_TFree(smg_data -> residual_data_l); hypre_TFree(smg_data -> restrict_data_l); hypre_TFree(smg_data -> interp_data_l); hypre_StructVectorDestroy(smg_data -> tb_l[0]); hypre_StructVectorDestroy(smg_data -> tx_l[0]); hypre_StructGridDestroy(smg_data -> grid_l[0]); hypre_StructMatrixDestroy(smg_data -> A_l[0]); hypre_StructVectorDestroy(smg_data -> b_l[0]); hypre_StructVectorDestroy(smg_data -> x_l[0]); for (l = 0; l < ((smg_data -> num_levels) - 1); l++) { hypre_StructGridDestroy(smg_data -> grid_l[l+1]); hypre_StructGridDestroy(smg_data -> PT_grid_l[l+1]); hypre_StructMatrixDestroy(smg_data -> A_l[l+1]); if (smg_data -> PT_l[l] == smg_data -> R_l[l]) { hypre_StructMatrixDestroy(smg_data -> PT_l[l]); } else { hypre_StructMatrixDestroy(smg_data -> PT_l[l]); hypre_StructMatrixDestroy(smg_data -> R_l[l]); } hypre_StructVectorDestroy(smg_data -> b_l[l+1]); hypre_StructVectorDestroy(smg_data -> x_l[l+1]); hypre_StructVectorDestroy(smg_data -> tb_l[l+1]); hypre_StructVectorDestroy(smg_data -> tx_l[l+1]); } hypre_SharedTFree(smg_data -> data); hypre_TFree(smg_data -> grid_l); hypre_TFree(smg_data -> PT_grid_l); hypre_TFree(smg_data -> A_l); hypre_TFree(smg_data -> PT_l); hypre_TFree(smg_data -> R_l); hypre_TFree(smg_data -> b_l); hypre_TFree(smg_data -> x_l); hypre_TFree(smg_data -> tb_l); hypre_TFree(smg_data -> tx_l); } hypre_FinalizeTiming(smg_data -> time_index); hypre_TFree(smg_data); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetMemoryUse( void *smg_vdata, HYPRE_Int memory_use ) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> memory_use) = memory_use; return hypre_error_flag; } HYPRE_Int hypre_SMGGetMemoryUse( void *smg_vdata, HYPRE_Int * memory_use ) { hypre_SMGData *smg_data = smg_vdata; *memory_use = (smg_data -> memory_use); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetTol( void *smg_vdata, double tol ) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> tol) = tol; return hypre_error_flag; } HYPRE_Int hypre_SMGGetTol( void *smg_vdata, double *tol ) { hypre_SMGData *smg_data = smg_vdata; *tol = (smg_data -> tol); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetMaxIter( void *smg_vdata, HYPRE_Int max_iter ) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> max_iter) = max_iter; return hypre_error_flag; } HYPRE_Int hypre_SMGGetMaxIter( void *smg_vdata, HYPRE_Int * max_iter ) { hypre_SMGData *smg_data = smg_vdata; *max_iter = (smg_data -> max_iter); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetRelChange( void *smg_vdata, HYPRE_Int rel_change ) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> rel_change) = rel_change; return hypre_error_flag; } HYPRE_Int hypre_SMGGetRelChange( void *smg_vdata, HYPRE_Int * rel_change ) { hypre_SMGData *smg_data = smg_vdata; *rel_change = (smg_data -> rel_change); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetZeroGuess( void *smg_vdata, HYPRE_Int zero_guess ) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> zero_guess) = zero_guess; return hypre_error_flag; } HYPRE_Int hypre_SMGGetZeroGuess( void *smg_vdata, HYPRE_Int * zero_guess ) { hypre_SMGData *smg_data = smg_vdata; *zero_guess = (smg_data -> zero_guess); return hypre_error_flag; } /*-------------------------------------------------------------------------- * Note that we require at least 1 pre-relax sweep. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetNumPreRelax( void *smg_vdata, HYPRE_Int num_pre_relax ) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> num_pre_relax) = hypre_max(num_pre_relax,1); return hypre_error_flag; } HYPRE_Int hypre_SMGGetNumPreRelax( void *smg_vdata, HYPRE_Int * num_pre_relax ) { hypre_SMGData *smg_data = smg_vdata; *num_pre_relax = (smg_data -> num_pre_relax); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetNumPostRelax( void *smg_vdata, HYPRE_Int num_post_relax ) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> num_post_relax) = num_post_relax; return hypre_error_flag; } HYPRE_Int hypre_SMGGetNumPostRelax( void *smg_vdata, HYPRE_Int * num_post_relax ) { hypre_SMGData *smg_data = smg_vdata; *num_post_relax = (smg_data -> num_post_relax); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetBase( void *smg_vdata, hypre_Index base_index, hypre_Index base_stride ) { hypre_SMGData *smg_data = smg_vdata; HYPRE_Int d; for (d = 0; d < 3; d++) { hypre_IndexD((smg_data -> base_index), d) = hypre_IndexD(base_index, d); hypre_IndexD((smg_data -> base_stride), d) = hypre_IndexD(base_stride, d); } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetLogging( void *smg_vdata, HYPRE_Int logging) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> logging) = logging; return hypre_error_flag; } HYPRE_Int hypre_SMGGetLogging( void *smg_vdata, HYPRE_Int * logging) { hypre_SMGData *smg_data = smg_vdata; *logging = (smg_data -> logging); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetPrintLevel( void *smg_vdata, HYPRE_Int print_level) { hypre_SMGData *smg_data = smg_vdata; (smg_data -> print_level) = print_level; return hypre_error_flag; } HYPRE_Int hypre_SMGGetPrintLevel( void *smg_vdata, HYPRE_Int * print_level) { hypre_SMGData *smg_data = smg_vdata; *print_level = (smg_data -> print_level); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGGetNumIterations( void *smg_vdata, HYPRE_Int *num_iterations ) { hypre_SMGData *smg_data = smg_vdata; *num_iterations = (smg_data -> num_iterations); return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGPrintLogging( void *smg_vdata, HYPRE_Int myid) { hypre_SMGData *smg_data = smg_vdata; HYPRE_Int i; HYPRE_Int num_iterations = (smg_data -> num_iterations); HYPRE_Int logging = (smg_data -> logging); HYPRE_Int print_level = (smg_data -> print_level); double *norms = (smg_data -> norms); double *rel_norms = (smg_data -> rel_norms); if (myid == 0) { if (print_level > 0) { if (logging > 0) { for (i = 0; i < num_iterations; i++) { hypre_printf("Residual norm[%d] = %e ",i,norms[i]); hypre_printf("Relative residual norm[%d] = %e\n",i,rel_norms[i]); } } } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGGetFinalRelativeResidualNorm( void *smg_vdata, double *relative_residual_norm ) { hypre_SMGData *smg_data = smg_vdata; HYPRE_Int max_iter = (smg_data -> max_iter); HYPRE_Int num_iterations = (smg_data -> num_iterations); HYPRE_Int logging = (smg_data -> logging); double *rel_norms = (smg_data -> rel_norms); if (logging > 0) { if (num_iterations == max_iter) { *relative_residual_norm = rel_norms[num_iterations-1]; } else { *relative_residual_norm = rel_norms[num_iterations]; } } return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SMGSetStructVectorConstantValues( hypre_StructVector *vector, double values, hypre_BoxArray *box_array, hypre_Index stride ) { hypre_Box *v_data_box; HYPRE_Int vi; double *vp; hypre_Box *box; hypre_Index loop_size; hypre_IndexRef start; HYPRE_Int i; /*----------------------------------------------------------------------- * Set the vector coefficients *-----------------------------------------------------------------------*/ hypre_ForBoxI(i, box_array) { box = hypre_BoxArrayBox(box_array, i); start = hypre_BoxIMin(box); v_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i); vp = hypre_StructVectorBoxData(vector, i); hypre_BoxGetStrideSize(box, stride, loop_size); hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size, v_data_box, start, stride, vi); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop1For(vi) { vp[vi] = values; } hypre_BoxLoop1End(vi); } return hypre_error_flag; }
for-4.c
/* { dg-do compile } */ /* { dg-options "-fopenmp -fdump-tree-ompexp" } */ extern void bar(int); void foo (int n) { int i; #pragma omp for schedule(dynamic) for (i = 0; i < n; ++i) bar(i); } /* { dg-final { scan-tree-dump-times "GOMP_loop_dynamic_start" 1 "ompexp" } } */ /* { dg-final { scan-tree-dump-times "GOMP_loop_dynamic_next" 1 "ompexp" } } */
c_mandel.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: [email protected] This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License (LICENSE file) along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA FILE: c_mandel.c VERSION: 1.0 DATE: May 2004 AUTHOR: F. de Sande COMMENTS TO: [email protected] DESCRIPTION: This program computes an estimation to the Mandelbrot Set area using MonteCarlo sampling. The best known estimate so far is 1.50659177 +- 0.00000008. COMMENTS: The Mandelbrot set is a fractal that is defined as the set of points c in the complex plane for which the sequence z_{n+1} = z_n^2 + c with z_0 = 0 does not tend to infinity. The area of the Mandelbrot set is an open question that has been discussed in the recent past. It is not easy to obtain an accurate analytical estimate, and therefore statistical methods have been used. The program explores the rectangle ranging over (-2.0, 0.5) in the real axis and (0.0, 1.125) in the imaginary axis. This rectangle covers the top half of the Mandelbrot Set. REFERENCES: http://www.fractalus.com/kerry/articles/area/mandelbrot-area.html http://www.matthiasbook.de/papers/parallelfractals/mandelbrot.html http://www.mrob.com/pub/muency/areaofthemandelbrotset.html http://en.wikipedia.org/wiki/Mandelbrot_set BASIC PRAGMAS: parallel for USAGE: ./c_mandel.par 8192 INPUT: The number of points to explore OUTPUT: An estimation of the area and the error. FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ #include "OmpSCR.h" #include <math.h> #define OSCR_RAND_MAX 2147483647 #define MAXITER 100000 #define DEFAULT_NPOINTS 4092 #define THRESOLD 2.0 #define NUM_ARGS 1 #define NUM_TIMERS 1 typedef struct { double re, im; } complex; int NPOINTS; /* Total no. of points */ complex *points; /* ----------------------------------------------------------------------- IMPLEMENTATION * ----------------------------------------------------------------------- */ int main(int argc, char **argv) { int i, j, NUMTHREADS; long inside, /* no. of points inside the Mandelbrot set */ outside; /* no. of points outside the Mandelbrot set */ double area, error, ztemp, total_time; complex z; char *PARAM_NAMES[NUM_ARGS] = {"Number of points"}; char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"}; char *DEFAULT_VALUES[NUM_ARGS] = {"4092"}; NUMTHREADS = omp_get_max_threads(); OSCR_init (NUMTHREADS, "Mandelbrot set area", "Use 'mandel' <Number of points>", NUM_ARGS, PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, argc, argv); NPOINTS = OSCR_getarg_int(1); /* Default: DEFAULT_NPOINTS */ points = (complex *)OSCR_calloc(NPOINTS, sizeof(complex)); NUMTHREADS = omp_get_max_threads(); /*1. Generate NPOINTS random points in the complex plane */ srandom(31416); for (i = 0; i < NPOINTS; i++) { points[i].re = -2.0 + 2.5 * random() / OSCR_RAND_MAX; points[i].im = 1.125 * random() / OSCR_RAND_MAX; } /* * 2. Monte Carlo sampling * 2a. Outer loop runs over NPOINTS, initialise z=c * 2b. Inner loop has the iteration z=z*z+c, and threshold test */ OSCR_timer_start(0); outside = 0; #pragma omp parallel for default(none) reduction(+:outside) \ private(i, j, ztemp, z) shared(NPOINTS, points) for(i = 0; i < NPOINTS; i++) { z.re = points[i].re; z.im = points[i].im; for (j = 0; j < MAXITER; j++) { ztemp = (z.re * z.re) - (z.im * z.im) + points[i].re; z.im = z.re * z.im * 2 + points[i].im; z.re = ztemp; if (z.re * z.re + z.im * z.im > THRESOLD) { outside++; break; } } /* for j */ } /* for i */ inside = (long)NPOINTS - outside; /*3. Calculate area and error */ /* The area is proportional to 2 * the area of the rectangle * no. of points inside it */ /* The error is inversely proportional to the square root of the number of test cases */ area = 2.0 * (2.5 * 1.125) * inside / NPOINTS; error = area / sqrt(NPOINTS); OSCR_timer_stop(0); total_time = OSCR_timer_read(0); /* 4. Output the Results */ OSCR_report(1, TIMERS_NAMES); printf("\n \t# THREADS NPOINTS AREA \t\t\tERROR \t\tTIME (secs.)\n"); printf("\t%d \t%d \t%16.12f %16.12f \t%lf\n", NUMTHREADS, NPOINTS, area, error, total_time); return 0; } /* * vim:ts=2:sw=2: */
tool_not_available.c
// The OpenMP standard defines 3 ways of providing ompt_start_tool: // 1. "statically-linking the tool’s definition of ompt_start_tool into an OpenMP application" // RUN: %libomp-compile -DCODE -DTOOL && %libomp-run | FileCheck %s // Note: We should compile the tool without -fopenmp as other tools developer // would do. Otherwise this test may pass for the wrong reasons on Darwin. // RUN: %clang %flags -DTOOL -shared -fPIC %s -o %T/tool.so // 2. "introducing a dynamically-linked library that includes the tool’s definition of ompt_start_tool into the application’s address space" // 2.1 Link with tool during compilation // RUN: %libomp-compile -DCODE -Wl,--no-as-needed %T/tool.so && %libomp-run | FileCheck %s // 2.2 Link with tool during compilation, but AFTER the runtime // RUN: %libomp-compile -DCODE -lomp -Wl,--no-as-needed %T/tool.so && %libomp-run | FileCheck %s // 2.3 Inject tool via the dynamic loader // RUN: %libomp-compile -DCODE && %preload-tool %libomp-run | FileCheck %s // 3. "providing the name of a dynamically-linked library appropriate for the architecture and operating system used by the application in the tool-libraries-var ICV" // RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/tool.so %libomp-run | FileCheck %s // REQUIRES: ompt /* * This file contains code for an OMPT shared library tool to be * loaded and the code for the OpenMP executable. * -DTOOL enables the code for the tool during compilation * -DCODE enables the code for the executable during compilation */ #ifdef CODE #include "stdio.h" #include "omp.h" #include "ompt.h" int main() { #pragma omp parallel num_threads(2) { #pragma omp master { int result = omp_control_tool(omp_control_tool_start, 0, NULL); printf("0: control_tool()=%d\n", result); } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: Do not initialize tool // CHECK: {{^}}0: control_tool()=-2 return 0; } #endif /* CODE */ #ifdef TOOL #include <ompt.h> #include "stdio.h" ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { printf("0: Do not initialize tool\n"); return NULL; } #endif /* TOOL */
omp_parallel_sections_lastprivate.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel sections lastprivate directive.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp parallel sections lastprivate</ompts:directive> <ompts:dependences>omp critical,omp parallel sections private</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_parallel_sections_lastprivate</ompts:testcode:functionname>(FILE * logFile){ <ompts:orphan:vars> int sum; int sum0; int i; int i0; </ompts:orphan:vars> int known_sum; sum =0; sum0 = 0; i0 = -1; <ompts:orphan> #pragma omp parallel sections private(i,sum0) <ompts:check>lastprivate(i0)</ompts:check><ompts:crosscheck>private(i0)</ompts:crosscheck> { #pragma omp section { sum0=0; for (i=1;i<400;i++) { sum0=sum0+i; i0=i; } #pragma omp critical { sum= sum+sum0; } /*end of critical*/ }/* end of section */ #pragma omp section { sum0=0; for(i=400;i<700;i++) { sum0=sum0+i; /*end of for*/ i0=i; } #pragma omp critical { sum= sum+sum0; } /*end of critical*/ } #pragma omp section { sum0=0; for(i=700;i<1000;i++) { sum0=sum0+i; i0=i; } #pragma omp critical { sum= sum+sum0; } /*end of critical*/ } }/* end of parallel sections*/ </ompts:orphan> known_sum=(999*1000)/2; return ((known_sum==sum) && (i0==999) ); } </ompts:testcode> </ompts:test>
pfmg2_setup_rap.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "pfmg.h" /*-------------------------------------------------------------------------- * Macro to "change coordinates". This routine is written as though * coarsening is being done in the y-direction. This macro is used to * allow for coarsening to be done in the x-direction also. *--------------------------------------------------------------------------*/ #define MapIndex(in_index, cdir, out_index) \ hypre_IndexD(out_index, 2) = hypre_IndexD(in_index, 2); \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \ cdir = (cdir + 1) % 2; \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \ cdir = (cdir + 1) % 2; /*-------------------------------------------------------------------------- * Sets up new coarse grid operator stucture. *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_PFMG2CreateRAPOp( hypre_StructMatrix *R, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructGrid *coarse_grid, HYPRE_Int cdir ) { hypre_StructMatrix *RAP; hypre_Index *RAP_stencil_shape; hypre_StructStencil *RAP_stencil; HYPRE_Int RAP_stencil_size; HYPRE_Int RAP_stencil_dim; HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1}; hypre_Index index_temp; HYPRE_Int j, i; HYPRE_Int stencil_rank; RAP_stencil_dim = 2; /*----------------------------------------------------------------------- * Define RAP_stencil *-----------------------------------------------------------------------*/ stencil_rank = 0; /*----------------------------------------------------------------------- * non-symmetric case *-----------------------------------------------------------------------*/ if (!hypre_StructMatrixSymmetric(A)) { /*-------------------------------------------------------------------- * 5 or 9 point fine grid stencil produces 9 point RAP *--------------------------------------------------------------------*/ RAP_stencil_size = 9; RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (j = -1; j < 2; j++) { for (i = -1; i < 2; i++) { /*-------------------------------------------------------------- * Storage for 9 elements (c,w,e,n,s,sw,se,nw,se) *--------------------------------------------------------------*/ hypre_SetIndex3(index_temp,i,j,0); MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]); stencil_rank++; } } } /*----------------------------------------------------------------------- * symmetric case *-----------------------------------------------------------------------*/ else { /*-------------------------------------------------------------------- * 5 or 9 point fine grid stencil produces 9 point RAP * Only store the lower triangular part + diagonal = 5 entries, * lower triangular means the lower triangular part on the matrix * in the standard lexicographic ordering. *--------------------------------------------------------------------*/ RAP_stencil_size = 5; RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (j = -1; j < 1; j++) { for (i = -1; i < 2; i++) { /*-------------------------------------------------------------- * Store 5 elements in (c,w,s,sw,se) *--------------------------------------------------------------*/ if( i+j <=0 ) { hypre_SetIndex3(index_temp,i,j,0); MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]); stencil_rank++; } } } } RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size, RAP_stencil_shape); RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A), coarse_grid, RAP_stencil); hypre_StructStencilDestroy(RAP_stencil); /*----------------------------------------------------------------------- * Coarse operator in symmetric iff fine operator is *-----------------------------------------------------------------------*/ hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A); /*----------------------------------------------------------------------- * Set number of ghost points - one one each boundary *-----------------------------------------------------------------------*/ hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost); return RAP; } /*-------------------------------------------------------------------------- * Routines to build RAP. These routines are fairly general * 1) No assumptions about symmetry of A * 2) No assumption that R = transpose(P) * 3) 5 or 9-point fine grid A * * I am, however, assuming that the c-to-c interpolation is the identity. * * I've written two routines - hypre_PFMG2BuildRAPSym to build the * lower triangular part of RAP (including the diagonal) and * hypre_PFMG2BuildRAPNoSym to build the upper triangular part of RAP * (excluding the diagonal). So using symmetric storage, only the * first routine would be called. With full storage both would need to * be called. * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PFMG2BuildRAPSym( hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_StructStencil *fine_stencil; HYPRE_Int fine_stencil_size; hypre_StructGrid *fgrid; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; HYPRE_Int *cgrid_ids; HYPRE_Int constant_coefficient; HYPRE_Int constant_coefficient_A; HYPRE_Int fi, ci; fine_stencil = hypre_StructMatrixStencil(A); fine_stencil_size = hypre_StructStencilSize(fine_stencil); fgrid = hypre_StructMatrixGrid(A); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); hypre_assert( constant_coefficient==0 || constant_coefficient==1 ); hypre_assert( hypre_StructMatrixConstantCoefficient(R) == constant_coefficient ); hypre_assert( hypre_StructMatrixConstantCoefficient(P) == constant_coefficient ); if (constant_coefficient==1 ) { hypre_assert( constant_coefficient_A==1 ); } else { hypre_assert( constant_coefficient_A==0 || constant_coefficient_A==2 ); } fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 9-point. *-----------------------------------------------------------------*/ switch (fine_stencil_size) { /*-------------------------------------------------------------- * Loop for symmetric 5-point fine grid operator; produces a * symmetric 9-point coarse grid operator. We calculate only the * lower triangular stencil entries: (southwest, south, southeast, * west, and center). *--------------------------------------------------------------*/ case 5: if ( constant_coefficient==1 ) { hypre_PFMG2BuildRAPSym_onebox_FSS5_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG2BuildRAPSym_onebox_FSS5_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; /*-------------------------------------------------------------- * Loop for symmetric 9-point fine grid operator; produces a * symmetric 9-point coarse grid operator. We calculate only the * lower triangular stencil entries: (southwest, south, southeast, * west, and center). *--------------------------------------------------------------*/ default: if ( constant_coefficient==1 ) { hypre_PFMG2BuildRAPSym_onebox_FSS9_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG2BuildRAPSym_onebox_FSS9_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; } /* end switch statement */ } /* end ForBoxI */ return hypre_error_flag; } /* for fine stencil size 5, constant coefficient 0 */ HYPRE_Int hypre_PFMG2BuildRAPSym_onebox_FSS5_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; hypre_Box *cgrid_box; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real a_cw_offd, a_cw_offdm1, a_cw_offdp1, a_ce_offdm1; HYPRE_Real a_cs_offd, a_cs_offdm1, a_cs_offdp1, a_cn_offd, a_cn_offdm1; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA, yOffsetA_diag, yOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { yOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { yOffsetA_offd = 0; yOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); } hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*-------------------------------------------------------------- * Loop for symmetric 5-point fine grid operator; produces a * symmetric 9-point coarse grid operator. We calculate only the * lower triangular stencil entries: (southwest, south, southeast, * west, and center). *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A == 0 ) { hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_cs[iAm1] + a_cs[iA] * pa[iP1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_cn[iAm1] + ra[iR] * a_cs[iAp1] + a_cs[iA] * pb[iP] + a_cn[iA] * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { iA_offd = 0; iA_offdm1 = iA_offd - yOffsetA_offd; iA_offdp1 = iA_offd + yOffsetA_offd; a_cn_offd = a_cn[iA_offd]; a_cn_offdm1 = a_cn[iA_offdm1]; a_cs_offd = a_cs[iA_offd]; a_cs_offdm1 = a_cs[iA_offdm1]; a_cs_offdp1 = a_cs[iA_offdp1]; a_cw_offd = a_cw[iA_offd]; a_cw_offdp1 = a_cw[iA_offdp1]; a_cw_offdm1 = a_cw[iA_offdm1]; a_ce_offdm1 = a_ce[iA_offdm1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA_diag; iAp1 = iA + yOffsetA_diag; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_cs_offdm1 + a_cs_offd * pa[iP1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw_offd + rb[iR] * a_cw_offdm1 * pb[iP1] + ra[iR] * a_cw_offdp1 * pa[iP1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_cn_offdm1 + ra[iR] * a_cs_offdp1 + a_cs_offd * pb[iP] + a_cn_offd * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* } *//* end ForBoxI */ return hypre_error_flag; } /* for fine stencil size 5, constant coefficient 1 */ HYPRE_Int hypre_PFMG2BuildRAPSym_onebox_FSS5_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetA = 0; yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 9-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for symmetric 5-point fine grid operator; produces a * symmetric 9-point coarse grid operator. We calculate only the * lower triangular stencil entries: (southwest, south, southeast, * west, and center). *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_cs[iAm1] + a_cs[iA] * pa[iP1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_cn[iAm1] + ra[iR] * a_cs[iAp1] + a_cs[iA] * pb[iP] + a_cn[iA] * pa[iP]; /* } *//* end ForBoxI */ return hypre_error_flag; } /* for fine stencil size 9, constant coefficient 0 */ HYPRE_Int hypre_PFMG2BuildRAPSym_onebox_FSS9_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_csw, *a_cse, *a_cnw; HYPRE_Real a_cw_offd, a_cw_offdm1, a_cw_offdp1, a_ce_offdm1; HYPRE_Real a_cs_offd, a_cs_offdm1, a_cs_offdp1, a_cn_offd, a_cn_offdm1; HYPRE_Real a_csw_offd, a_csw_offdm1, a_csw_offdp1, a_cse_offd, a_cse_offdm1; HYPRE_Real a_cnw_offd, a_cnw_offdm1; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA, yOffsetA_diag, yOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 9-point fine grid operator: * * a_csw is pointer for southwest coefficient * a_cse is pointer for southeast coefficient * a_cnw is pointer for northwest coefficient * a_cne is pointer for northeast coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { yOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { yOffsetA_offd = 0; yOffsetA_diag = 0; } hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*-------------------------------------------------------------- * Loop for symmetric 9-point fine grid operator; produces a * symmetric 9-point coarse grid operator. We calculate only the * lower triangular stencil entries: (southwest, south, southeast, * west, and center). *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A == 0 ) { hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1] + rb[iR] * a_csw[iAm1] + a_csw[iA] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_cs[iAm1] + a_cs[iA] * pa[iP1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1] + rb[iR] * a_cse[iAm1] + a_cse[iA] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1] + rb[iR] * a_cnw[iAm1] + ra[iR] * a_csw[iAp1] + a_csw[iA] * pb[iP1] + a_cnw[iA] * pa[iP1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_cn[iAm1] + ra[iR] * a_cs[iAp1] + a_cs[iA] * pb[iP] + a_cn[iA] * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { iA_offd = 0; iA_offdm1 = iA_offd - yOffsetA_offd; iA_offdp1 = iA_offd + yOffsetA_offd; a_cn_offd = a_cn[iA_offd]; a_cn_offdm1 = a_cn[iA_offdm1]; a_cs_offd = a_cs[iA_offd]; a_cs_offdm1 = a_cs[iA_offdm1]; a_cs_offdp1 = a_cs[iA_offdp1]; a_cw_offd = a_cw[iA_offd]; a_cw_offdp1 = a_cw[iA_offdp1]; a_cw_offdm1 = a_cw[iA_offdm1]; a_ce_offdm1 = a_ce[iA_offdm1]; a_csw_offd = a_csw[iA_offd]; a_csw_offdm1 = a_csw[iA_offdm1]; a_csw_offdp1 = a_csw[iA_offdp1]; a_cse_offd = a_cse[iA_offd]; a_cse_offdm1 = a_cse[iA_offdm1]; a_cnw_offd = a_cnw[iA_offd]; a_cnw_offdm1 = a_cnw[iA_offdm1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA_diag; iAp1 = iA + yOffsetA_diag; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = rb[iR] * a_cw_offdm1 * pa[iP1] + rb[iR] * a_csw_offdm1 + a_csw_offd * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_cs_offdm1 + a_cs_offd * pa[iP1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = rb[iR] * a_ce_offdm1 * pa[iP1] + rb[iR] * a_cse_offdm1 + a_cse_offd * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw_offd + rb[iR] * a_cw_offdm1 * pb[iP1] + ra[iR] * a_cw_offdp1 * pa[iP1] + rb[iR] * a_cnw_offdm1 + ra[iR] * a_csw_offdp1 + a_csw_offd * pb[iP1] + a_cnw_offd * pa[iP1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_cn_offdm1 + ra[iR] * a_cs_offdp1 + a_cs_offd * pb[iP] + a_cn_offd * pa[iP]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* for fine stencil size 9, constant coefficient 1 */ HYPRE_Int hypre_PFMG2BuildRAPSym_onebox_FSS9_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn; HYPRE_Real *a_csw, *a_cse, *a_cnw; HYPRE_Real *rap_cc, *rap_cw, *rap_cs; HYPRE_Real *rap_csw, *rap_cse; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 9-point fine grid operator: * * a_csw is pointer for southwest coefficient * a_cse is pointer for southeast coefficient * a_cnw is pointer for northwest coefficient * a_cne is pointer for northeast coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); a_csw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the lower triangular part (plus diagonal). * * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,-1,0); MapIndex(index_temp, cdir, index); rap_csw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); rap_cse = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetA = 0; yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*----------------------------------------------------------------- * Switch statement to direct control to apropriate BoxLoop depending * on stencil size. Default is full 9-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for symmetric 9-point fine grid operator; produces a * symmetric 9-point coarse grid operator. We calculate only the * lower triangular stencil entries: (southwest, south, southeast, * west, and center). *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP - yOffsetP - xOffsetP; rap_csw[iAc] = rb[iR] * a_cw[iAm1] * pa[iP1] + rb[iR] * a_csw[iAm1] + a_csw[iA] * pa[iP1]; iP1 = iP - yOffsetP; rap_cs[iAc] = rb[iR] * a_cc[iAm1] * pa[iP1] + rb[iR] * a_cs[iAm1] + a_cs[iA] * pa[iP1]; iP1 = iP - yOffsetP + xOffsetP; rap_cse[iAc] = rb[iR] * a_ce[iAm1] * pa[iP1] + rb[iR] * a_cse[iAm1] + a_cse[iA] * pa[iP1]; iP1 = iP - xOffsetP; rap_cw[iAc] = a_cw[iA] + rb[iR] * a_cw[iAm1] * pb[iP1] + ra[iR] * a_cw[iAp1] * pa[iP1] + rb[iR] * a_cnw[iAm1] + ra[iR] * a_csw[iAp1] + a_csw[iA] * pb[iP1] + a_cnw[iA] * pa[iP1]; rap_cc[iAc] = a_cc[iA] + rb[iR] * a_cc[iAm1] * pb[iP] + ra[iR] * a_cc[iAp1] * pa[iP] + rb[iR] * a_cn[iAm1] + ra[iR] * a_cs[iAp1] + a_cs[iA] * pb[iP] + a_cn[iA] * pa[iP]; /* }*/ /* end ForBoxI */ return hypre_error_flag; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PFMG2BuildRAPNoSym( hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_StructStencil *fine_stencil; HYPRE_Int fine_stencil_size; hypre_StructGrid *fgrid; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; HYPRE_Int *cgrid_ids; HYPRE_Int fi, ci; HYPRE_Int constant_coefficient; fine_stencil = hypre_StructMatrixStencil(A); fine_stencil_size = hypre_StructStencilSize(fine_stencil); fgrid = hypre_StructMatrixGrid(A); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP); if (constant_coefficient) { hypre_assert( hypre_StructMatrixConstantCoefficient(R) ); hypre_assert( hypre_StructMatrixConstantCoefficient(A) ); hypre_assert( hypre_StructMatrixConstantCoefficient(P) ); } else { /* hypre_assert( hypre_StructMatrixConstantCoefficient(R)==0 ); hypre_assert( hypre_StructMatrixConstantCoefficient(A)==0 ); hypre_assert( hypre_StructMatrixConstantCoefficient(P)==0 ); */ } fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } /*----------------------------------------------------------------- * Switch statement to direct control to appropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ switch (fine_stencil_size) { /*-------------------------------------------------------------- * Loop for 5-point fine grid operator; produces upper triangular * part of 9-point coarse grid operator - excludes diagonal. * stencil entries: (northeast, north, northwest, and east) *--------------------------------------------------------------*/ case 5: if ( constant_coefficient==1 ) { hypre_PFMG2BuildRAPNoSym_onebox_FSS5_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG2BuildRAPNoSym_onebox_FSS5_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; /*-------------------------------------------------------------- * Loop for 9-point fine grid operator; produces upper triangular * part of 9-point coarse grid operator - excludes diagonal. * stencil entries: (northeast, north, northwest, and east) *--------------------------------------------------------------*/ default: if ( constant_coefficient==1 ) { hypre_PFMG2BuildRAPNoSym_onebox_FSS9_CC1( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } else { hypre_PFMG2BuildRAPNoSym_onebox_FSS9_CC0( ci, fi, A, P, R, cdir, cindex, cstride, RAP ); } break; } /* end switch statement */ } /* end ForBoxI */ return hypre_error_flag; } /* for fine stencil size 5, constant coefficient 0 */ HYPRE_Int hypre_PFMG2BuildRAPNoSym_onebox_FSS5_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cn; HYPRE_Real a_cn_offd, a_cn_offdp1, a_cw_offdp1; HYPRE_Real a_ce_offd, a_ce_offdm1, a_ce_offdp1; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA, yOffsetA_diag, yOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; /*hypre_printf("nosym 5.0\n");*/ stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); /* fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } */ cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the upper triangular part. * * rap_ce is pointer for east coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { yOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { hypre_assert( constant_coefficient_A==2 ); yOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); yOffsetA_offd = 0; } hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*-------------------------------------------------------------- * Loop for 5-point fine grid operator; produces upper triangular * part of 9-point coarse grid operator - excludes diagonal. * stencil entries: (northeast, north, northwest, and east) *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A == 0 ) { /*hypre_printf("nosym 5.0.0\n");*/ hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_cn[iAp1] + a_cn[iA] * pb[iP1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { hypre_assert( constant_coefficient_A==2 ); /*hypre_printf("nosym 5.0.2\n"); */ iA_offd = 0; iA_offdm1 = iA_offd - yOffsetA_offd; iA_offdp1 = iA_offd + yOffsetA_offd; a_cn_offd = a_cn[iA_offd]; a_cn_offdp1 = a_cn[iA_offdp1]; a_cw_offdp1 = a_cw[iA_offdp1]; a_ce_offd = a_ce[iA_offd]; a_ce_offdm1 = a_ce[iA_offdm1]; a_ce_offdp1 = a_ce[iA_offdp1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAp1 = iA + yOffsetA_diag; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_cn_offdp1 + a_cn_offd * pb[iP1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce_offd + rb[iR] * a_ce_offdm1 * pb[iP1] + ra[iR] * a_ce_offdp1 * pa[iP1]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* for fine stencil size 5, constant coefficient 1 */ HYPRE_Int hypre_PFMG2BuildRAPNoSym_onebox_FSS5_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cn; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; /* hypre_printf("nosym 5.1\n");*/ cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the upper triangular part. * * rap_ce is pointer for east coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetA = 0; yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*----------------------------------------------------------------- * Switch statement to direct control to appropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 5-point fine grid operator; produces upper triangular * part of 9-point coarse grid operator - excludes diagonal. * stencil entries: (northeast, north, northwest, and east) *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_cn[iAp1] + a_cn[iA] * pb[iP1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1]; /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* for fine stencil size 9, constant coefficient 0 */ HYPRE_Int hypre_PFMG2BuildRAPNoSym_onebox_FSS9_CC0( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index stridec; hypre_Index fstart; hypre_IndexRef stridef; hypre_Index loop_size; HYPRE_Int constant_coefficient_A; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *R_dbox; hypre_Box *RAP_dbox; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cn; HYPRE_Real *a_cse, *a_cnw, *a_cne; HYPRE_Real a_cn_offd, a_cn_offdp1, a_cw_offdp1; HYPRE_Real a_ce_offd, a_ce_offdm1, a_ce_offdp1; HYPRE_Real a_cne_offd, a_cne_offdm1, a_cne_offdp1; HYPRE_Real a_cse_offd, a_cse_offdp1, a_cnw_offd, a_cnw_offdp1; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Int iA, iAm1, iAp1, iA_offd, iA_offdm1, iA_offdp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA, yOffsetA_diag, yOffsetA_offd; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; /*hypre_printf("nosym 9.0\n");*/ stridef = cstride; hypre_SetIndex3(stridec, 1, 1, 1); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); constant_coefficient_A = hypre_StructMatrixConstantCoefficient(A); /* fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } */ cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); R_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(R), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index) - hypre_BoxOffsetDistance(R_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 9-point fine grid operator: * * a_csw is pointer for southwest coefficient * a_cse is pointer for southeast coefficient * a_cnw is pointer for northwest coefficient * a_cne is pointer for northeast coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the upper triangular part. * * rap_ce is pointer for east coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetP = hypre_BoxOffsetDistance(P_dbox,index); if ( constant_coefficient_A == 0 ) { yOffsetA = hypre_BoxOffsetDistance(A_dbox,index); } else { hypre_assert( constant_coefficient_A==2 ); yOffsetA_diag = hypre_BoxOffsetDistance(A_dbox,index); yOffsetA_offd = 0; } hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = hypre_BoxOffsetDistance(P_dbox,index); /*----------------------------------------------------------------- * Switch statement to direct control to appropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 9-point fine grid operator; produces upper triangular * part of 9-point coarse grid operator - excludes diagonal. * stencil entries: (northeast, north, northwest, and east) *--------------------------------------------------------------*/ hypre_BoxGetSize(cgrid_box, loop_size); if ( constant_coefficient_A==0 ) { /*hypre_printf("nosym 9.0.0\n");*/ hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1] + ra[iR] * a_cne[iAp1] + a_cne[iA] * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_cn[iAp1] + a_cn[iA] * pb[iP1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1] + ra[iR] * a_cnw[iAp1] + a_cnw[iA] * pb[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1] + rb[iR] * a_cne[iAm1] + ra[iR] * a_cse[iAp1] + a_cse[iA] * pb[iP1] + a_cne[iA] * pa[iP1]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } else { /*hypre_printf("nosym 9.0.2\n");*/ hypre_assert( constant_coefficient_A==2 ); iA_offd = 0; iA_offdm1 = iA_offd - yOffsetA_offd; iA_offdp1 = iA_offd + yOffsetA_offd; a_cn_offd = a_cn[iA_offd]; a_cn_offdp1 = a_cn[iA_offdp1]; a_cw_offdp1 = a_cw[iA_offdp1]; a_ce_offd = a_ce[iA_offd]; a_ce_offdm1 = a_ce[iA_offdm1]; a_ce_offdp1 = a_ce[iA_offdp1]; a_cne_offd = a_cne[iA_offd]; a_cne_offdm1 = a_cne[iA_offdm1]; a_cne_offdp1 = a_cne[iA_offdp1]; a_cse_offd = a_cse[iA_offd]; a_cse_offdp1 = a_cse[iA_offdp1]; a_cnw_offd = a_cnw[iA_offd]; a_cnw_offdp1 = a_cnw[iA_offdp1]; hypre_BoxLoop4Begin(hypre_StructMatrixNDim(A), loop_size, P_dbox, cstart, stridec, iP, R_dbox, cstart, stridec, iR, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iR,iA,iAc,iAm1,iAp1,iP1) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop4For(iP, iR, iA, iAc) { iAm1 = iA - yOffsetA_diag; iAp1 = iA + yOffsetA_diag; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = ra[iR] * a_ce_offdp1 * pb[iP1] + ra[iR] * a_cne_offdp1 + a_cne_offd * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_cn_offdp1 + a_cn_offd * pb[iP1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = ra[iR] * a_cw_offdp1 * pb[iP1] + ra[iR] * a_cnw_offdp1 + a_cnw_offd * pb[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce_offd + rb[iR] * a_ce_offdm1 * pb[iP1] + ra[iR] * a_ce_offdp1 * pa[iP1] + rb[iR] * a_cne_offdm1 + ra[iR] * a_cse_offdp1 + a_cse_offd * pb[iP1] + a_cne_offd * pa[iP1]; } hypre_BoxLoop4End(iP, iR, iA, iAc); } /* }*/ /* end ForBoxI */ return hypre_error_flag; } /* for fine stencil size 9, constant coefficient 1 */ HYPRE_Int hypre_PFMG2BuildRAPNoSym_onebox_FSS9_CC1( HYPRE_Int ci, HYPRE_Int fi, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; hypre_IndexRef cstart; hypre_Index fstart; HYPRE_Real *pa, *pb; HYPRE_Real *ra, *rb; HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cn; HYPRE_Real *a_cse, *a_cnw, *a_cne; HYPRE_Real *rap_ce, *rap_cn; HYPRE_Real *rap_cnw, *rap_cne; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iP1; HYPRE_Int iR; HYPRE_Int yOffsetA; HYPRE_Int xOffsetP; HYPRE_Int yOffsetP; /*hypre_printf("nosym 9.1\n");*/ cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pa is pointer for weight for f-point above c-point * pb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index); /*----------------------------------------------------------------- * Extract pointers for restriction operator: * ra is pointer for weight for f-point above c-point * rb is pointer for weight for f-point below c-point *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); ra = hypre_StructMatrixExtractPointerByIndex(R, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rb = hypre_StructMatrixExtractPointerByIndex(R, fi, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cs is pointer for south coefficient * a_cn is pointer for north coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract additional pointers for 9-point fine grid operator: * * a_csw is pointer for southwest coefficient * a_cse is pointer for southeast coefficient * a_cnw is pointer for northwest coefficient * a_cne is pointer for northeast coefficient *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,-1,0); MapIndex(index_temp, cdir, index); a_cse = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); a_cnw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); a_cne = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator - always 9-point: * * We build only the upper triangular part. * * rap_ce is pointer for east coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,1,1,0); MapIndex(index_temp, cdir, index); rap_cne = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex3(index_temp,-1,1,0); MapIndex(index_temp, cdir, index); rap_cnw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex3(index_temp,0,1,0); MapIndex(index_temp, cdir, index); yOffsetA = 0; yOffsetP = 0; hypre_SetIndex3(index_temp,1,0,0); MapIndex(index_temp, cdir, index); xOffsetP = 0; /*----------------------------------------------------------------- * Switch statement to direct control to appropriate BoxLoop depending * on stencil size. Default is full 27-point. *-----------------------------------------------------------------*/ /*-------------------------------------------------------------- * Loop for 9-point fine grid operator; produces upper triangular * part of 9-point coarse grid operator - excludes diagonal. * stencil entries: (northeast, north, northwest, and east) *--------------------------------------------------------------*/ iP = 0; iR = 0; iA = 0; iAc = 0; iAm1 = iA - yOffsetA; iAp1 = iA + yOffsetA; iP1 = iP + yOffsetP + xOffsetP; rap_cne[iAc] = ra[iR] * a_ce[iAp1] * pb[iP1] + ra[iR] * a_cne[iAp1] + a_cne[iA] * pb[iP1]; iP1 = iP + yOffsetP; rap_cn[iAc] = ra[iR] * a_cc[iAp1] * pb[iP1] + ra[iR] * a_cn[iAp1] + a_cn[iA] * pb[iP1]; iP1 = iP + yOffsetP - xOffsetP; rap_cnw[iAc] = ra[iR] * a_cw[iAp1] * pb[iP1] + ra[iR] * a_cnw[iAp1] + a_cnw[iA] * pb[iP1]; iP1 = iP + xOffsetP; rap_ce[iAc] = a_ce[iA] + rb[iR] * a_ce[iAm1] * pb[iP1] + ra[iR] * a_ce[iAp1] * pa[iP1] + rb[iR] * a_cne[iAm1] + ra[iR] * a_cse[iAp1] + a_cse[iA] * pb[iP1] + a_cne[iA] * pa[iP1]; /* }*/ /* end ForBoxI */ return hypre_error_flag; }
projector.c
#include "projector.h" #include "linalg.h" #include "quadrature.h" #include "radial.h" #include "sbt.h" #include "utils.h" #include <complex.h> #include <math.h> #include <mkl.h> #include <mkl_types.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define c 0.262465831 #define PI 3.14159265358979323846 #define DENSE_GRID_SCALE 1 ppot_t *get_projector_list(int num_els, int *labels, int *ls, double *wave_grids, double *projectors, double *aewaves, double *pswaves, double *rmaxs, double grid_encut) { setbuf(stdout, NULL); ppot_t *pps = (ppot_t *)malloc(num_els * sizeof(ppot_t)); CHECK_ALLOCATION(pps); int wt = 0; int pt = 0; int wgt = 0; int pgt = 0; int l_num = 0; for (int i = 0; i < num_els; i++) { pps[i].num_projs = labels[4 * i + 1]; pps[i].rmax = rmaxs[i]; pps[i].proj_gridsize = labels[4 * i + 2]; pps[i].wave_gridsize = labels[4 * i + 3]; pps[i].total_projs = 0; pps[i].wave_grid = (double *)malloc((pps[i].wave_gridsize) * sizeof(double)); pps[i].kwave_grid = (double *)malloc((pps[i].wave_gridsize) * sizeof(double)); CHECK_ALLOCATION(pps[i].wave_grid); CHECK_ALLOCATION(pps[i].kwave_grid); pps[i].lmax = 0; pps[i].pspw_overlap_matrix = NULL; pps[i].aepw_overlap_matrix = NULL; pps[i].diff_overlap_matrix = NULL; for (int j = 0; j < pps[i].wave_gridsize; j++) { pps[i].wave_grid[j] = wave_grids[wgt]; wgt++; } pps[i].proj_grid = (double *)malloc(pps[i].proj_gridsize * sizeof(double)); CHECK_ALLOCATION(pps[i].proj_grid); for (int j = 0; j < pps[i].proj_gridsize; j++) { pps[i].proj_grid[j] = pps[i].rmax / pps[i].proj_gridsize * j; pgt++; } funcset_t *funcs = (funcset_t *)malloc(pps[i].num_projs * sizeof(funcset_t)); CHECK_ALLOCATION(funcs); double *dense_wavegrid = (double *)malloc( DENSE_GRID_SCALE * pps[i].wave_gridsize * sizeof(double)); CHECK_ALLOCATION(dense_wavegrid); dense_wavegrid[0] = pps[i].wave_grid[0]; double factor = pow(pps[i].wave_grid[1] / pps[i].wave_grid[0], 1.0 / DENSE_GRID_SCALE); for (int p = 1; p < pps[i].wave_gridsize * DENSE_GRID_SCALE; p++) { dense_wavegrid[p] = dense_wavegrid[p - 1] * factor; } pps[i].wave_rmax = pps[i].wave_grid[pps[i].wave_gridsize - 1]; pps[i].smooth_grid = (double *)malloc(pps[i].proj_gridsize * sizeof(double)); for (int j = 0; j < pps[i].proj_gridsize; j++) { pps[i].smooth_grid[j] = pps[i].wave_rmax / pps[i].proj_gridsize * j; } double *dense_kwavegrid = (double *)malloc( DENSE_GRID_SCALE * pps[i].wave_gridsize * sizeof(double)); CHECK_ALLOCATION(dense_kwavegrid); for (int k = 0; k < pps[i].num_projs; k++) { funcs[k].proj = (double *)malloc(sizeof(double) * pps[i].proj_gridsize); funcs[k].aewave = (double *)malloc(sizeof(double) * pps[i].wave_gridsize); funcs[k].pswave = (double *)malloc(sizeof(double) * pps[i].wave_gridsize); funcs[k].diffwave = (double *)malloc(sizeof(double) * pps[i].wave_gridsize); CHECK_ALLOCATION(funcs[k].proj); CHECK_ALLOCATION(funcs[k].aewave); CHECK_ALLOCATION(funcs[k].pswave); CHECK_ALLOCATION(funcs[k].diffwave); funcs[k].l = ls[l_num]; if (funcs[k].l > pps[i].lmax) pps[i].lmax = funcs[k].l; pps[i].total_projs += 2 * ls[l_num] + 1; l_num++; for (int j = 0; j < pps[i].wave_gridsize; j++) { funcs[k].aewave[j] = aewaves[wt]; funcs[k].pswave[j] = pswaves[wt]; funcs[k].diffwave[j] = aewaves[wt] - pswaves[wt]; wt++; } for (int j = 0; j < pps[i].proj_gridsize; j++) { funcs[k].proj[j] = projectors[pt]; pt++; } funcs[k].proj_spline = spline_coeff(pps[i].proj_grid, funcs[k].proj, pps[i].proj_gridsize); funcs[k].aewave_spline = spline_coeff(pps[i].wave_grid, funcs[k].aewave, pps[i].wave_gridsize); funcs[k].pswave_spline = spline_coeff(pps[i].wave_grid, funcs[k].pswave, pps[i].wave_gridsize); funcs[k].diffwave_spline = spline_coeff( pps[i].wave_grid, funcs[k].diffwave, pps[i].wave_gridsize); } sbt_descriptor_t *d = spherical_bessel_transform_setup( 1e7, 0, pps[i].lmax, pps[i].wave_gridsize, pps[i].wave_grid, pps[i].kwave_grid); for (int k = 0; k < pps[i].num_projs; k++) { funcs[k].kwave = wave_spherical_bessel_transform(d, funcs[k].diffwave, funcs[k].l); // funcs[k].kwave = besselt(pps[i].wave_grid, pps[i].kwave_grid, // funcs[k].diffwave, 520.0, pps[i].wave_gridsize, funcs[k].l); funcs[k].kwave_spline = spline_coeff(pps[i].kwave_grid, funcs[k].kwave, pps[i].wave_gridsize); } for (int k = 0; k < pps[i].num_projs; k++) { // double* dense_kwave = wave_spherical_bessel_transform(d2, // funcs[k].smooth_diffwave, funcs[k].l); double *dense_kwave = (double *)calloc( pps[i].wave_gridsize * DENSE_GRID_SCALE, sizeof(double)); int q = 0; while (pps[i].kwave_grid[q] < pow(c * grid_encut, 0.5)) { dense_kwave[q] = funcs[k].kwave[q]; q++; } double *smooth_diffwave = inverse_wave_spherical_bessel_transform(d, dense_kwave, funcs[k].l); double **smooth_wave_spline = spline_coeff(dense_wavegrid, smooth_diffwave, DENSE_GRID_SCALE * pps[i].wave_gridsize); free(dense_kwave); double *sdw = (double *)malloc(pps[i].proj_gridsize * sizeof(double)); if (funcs[k].l > 0) sdw[0] = 0; for (int p = 1; p < pps[i].proj_gridsize; p++) { // smooth_grid should be like proj_grid except that rmax should be rmax // of the partial wave difference sdw[p] = wave_interpolate( pps[i].smooth_grid[p], pps[i].wave_gridsize * DENSE_GRID_SCALE, dense_wavegrid, smooth_diffwave, smooth_wave_spline); } if (funcs[k].l == 0) sdw[0] = sdw[1]; double **sdw_spline = spline_coeff(pps[i].smooth_grid, sdw, pps[i].proj_gridsize); funcs[k].smooth_diffwave = sdw; funcs[k].smooth_diffwave_spline = sdw_spline; free(smooth_diffwave); free(smooth_wave_spline[0]); free(smooth_wave_spline[1]); free(smooth_wave_spline[2]); free(smooth_wave_spline); } free_sbt_descriptor(d); pps[i].funcs = funcs; make_pwave_overlap_matrices(pps + i); } mkl_free_buffers(); printf("finished making projector list\n"); return pps; } double *besselt(double *r, double *k, double *f, double encut, int N, int l) { double kmax = pow(encut * c, 0.5); double drho = log(r[1] / r[0]); double kmin = kmax * exp((1 - N) * drho); double *g = (double *)malloc(N * sizeof(double)); CHECK_ALLOCATION(g); for (int i = 0; i < N; i++) { double dr = r[0]; g[i] = 0; k[i] = kmin * exp(i * drho); for (int j = 0; j < N; j++) { g[i] += f[j] * r[j] * sph_bessel(k[i], r[j], l) * dr; if (j != N - 1) dr = r[j + 1] - r[j]; } } return g; } real_proj_site_t *projector_values(int num_sites, int *labels, double *coords, double *lattice, double *reclattice, ppot_t *pps, int *fftg) { real_proj_site_t *sites = (real_proj_site_t *)malloc(num_sites * sizeof(real_proj_site_t)); CHECK_ALLOCATION(sites); int *all_sites = (int *)malloc(num_sites * sizeof(int)); for (int i = 0; i < num_sites; i++) { all_sites[i] = i; } setup_site(sites, pps, num_sites, all_sites, labels, coords, lattice, fftg, 0); free(all_sites); return sites; } real_proj_site_t *smooth_pw_values(int num_N, int *Nlst, int *labels, double *coords, double *lattice, double *reclattice, ppot_t *pps, int *fftg) { real_proj_site_t *sites = (real_proj_site_t *)malloc(num_N * sizeof(real_proj_site_t)); CHECK_ALLOCATION(sites); setup_site(sites, pps, num_N, Nlst, labels, coords, lattice, fftg, 1); return sites; } void onto_projector_helper(band_t *band, double complex *x, real_proj_site_t *sites, int num_sites, double *lattice, double *reclattice, double *kpt, int num_cart_gridpts, int *fftg, projection_t *projections) { double dv = determinant(lattice) / fftg[0] / fftg[1] / fftg[2]; double kdotr = 0; double kpt_cart[3] = {0, 0, 0}; kpt_cart[0] = kpt[0]; kpt_cart[1] = kpt[1]; kpt_cart[2] = kpt[2]; frac_to_cartesian(kpt_cart, reclattice); double complex overlap; double complex *values; double complex *xvals = (double complex *)malloc(num_cart_gridpts * sizeof(double complex)); int *indices; int num_indices, index; for (int s = 0; s < num_sites; s++) { num_indices = sites[s].num_indices; indices = sites[s].indices; projections[s].num_projs = sites[s].num_projs; projections[s].total_projs = sites[s].total_projs; projections[s].ns = malloc(sites[s].total_projs * sizeof(int)); projections[s].ls = malloc(sites[s].total_projs * sizeof(int)); projections[s].ms = malloc(sites[s].total_projs * sizeof(int)); projections[s].overlaps = (double complex *)malloc(sites[s].total_projs * sizeof(double complex)); CHECK_ALLOCATION(projections[s].ns); CHECK_ALLOCATION(projections[s].ls); CHECK_ALLOCATION(projections[s].ms); CHECK_ALLOCATION(projections[s].overlaps); for (int i = 0; i < num_indices; i++) { index = indices[i]; kdotr = dot(kpt_cart, sites[s].paths + i * 3); xvals[i] = x[index] * dv * cexp(I * kdotr); } for (int p = 0; p < sites[s].total_projs; p++) { projections[s].ns[p] = sites[s].projs[p].func_num; projections[s].ls[p] = sites[s].projs[p].l; projections[s].ms[p] = sites[s].projs[p].m; values = sites[s].projs[p].values; cblas_zdotc_sub(num_indices, values, 1, xvals, 1, &overlap); projections[s].overlaps[p] = overlap; } } free(xvals); } void get_aug_freqs_helper(band_t *band, double complex *x, real_proj_site_t *sites, int num_sites, double *lattice, double *reclattice, double *kpt, int num_cart_gridpts, int *fftg, projection_t *projections) { double dv = determinant(lattice) / fftg[0] / fftg[1] / fftg[2]; double kdotr = 0; int gridsize = fftg[0] * fftg[1] * fftg[2]; for (int w = 0; w < gridsize; w++) { x[w] = 0; } double kpt_cart[3] = {0, 0, 0}; kpt_cart[0] = kpt[0]; kpt_cart[1] = kpt[1]; kpt_cart[2] = kpt[2]; frac_to_cartesian(kpt_cart, reclattice); double complex overlap; double complex *values; int *indices; int i, j, k, ii, jj, kk; double frac[3]; double phasecoord[3]; double complex phase; int num_indices, index; for (int s = 0; s < num_sites; s++) { num_indices = sites[s].num_indices; indices = sites[s].indices; for (int ind = 0; ind < num_indices; ind++) { index = indices[ind]; i = index / (fftg[1] * fftg[2]); j = index % (fftg[1] * fftg[2]); j = j / (fftg[2]); k = index % (fftg[2]); frac[0] = (double)i / fftg[0]; frac[1] = (double)j / fftg[1]; frac[2] = (double)k / fftg[2]; phasecoord[0] = -sites[s].paths[3 * ind + 0]; // + frac[0]; phasecoord[1] = -sites[s].paths[3 * ind + 1]; // + frac[1]; phasecoord[2] = -sites[s].paths[3 * ind + 2]; // + frac[2]; phase = cexp(I * dot(phasecoord, kpt_cart)); for (int p = 0; p < sites[s].total_projs; p++) { values = sites[s].projs[p].values; x[index] += projections[sites[s].index].overlaps[p] * values[ind] * phase; } } } } void onto_projector(kpoint_t *kpt, int band_num, real_proj_site_t *sites, int num_sites, int *G_bounds, double *lattice, double *reclattice, int num_cart_gridpts, int *fftg) { double *k = kpt->k; int *Gs = kpt->Gs; float complex *Cs = kpt->bands[band_num]->Cs; int num_waves = kpt->num_waves; double complex *x = (double complex *)mkl_calloc(fftg[0] * fftg[1] * fftg[2], sizeof(double complex), 64); CHECK_ALLOCATION(x); fft3d(x, G_bounds, lattice, k, Gs, Cs, num_waves, fftg); band_t *band = kpt->bands[band_num]; band->projections = (projection_t *)malloc(num_sites * sizeof(projection_t)); CHECK_ALLOCATION(band->projections); onto_projector_helper(kpt->bands[band_num], x, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->projections); // kpt->bands[band_num]->CRs = x; mkl_free(x); } void onto_projector_ncl(kpoint_t *kpt, int band_num, real_proj_site_t *sites, int num_sites, int *G_bounds, double *lattice, double *reclattice, int num_cart_gridpts, int *fftg) { double *k = kpt->k; int *Gs = kpt->Gs; float complex *Cs = kpt->bands[band_num]->Cs; int num_waves = kpt->num_waves; double complex *xup = (double complex *)mkl_calloc( fftg[0] * fftg[1] * fftg[2], sizeof(double complex), 64); double complex *xdown = (double complex *)mkl_calloc( fftg[0] * fftg[1] * fftg[2], sizeof(double complex), 64); CHECK_ALLOCATION(xup); CHECK_ALLOCATION(xdown); fft3d(xup, G_bounds, lattice, k, Gs, Cs, num_waves / 2, fftg); fft3d(xdown, G_bounds, lattice, k, Gs, Cs + num_waves / 2, num_waves / 2, fftg); band_t *band = kpt->bands[band_num]; band->up_projections = (projection_t *)malloc(num_sites * sizeof(projection_t)); band->down_projections = (projection_t *)malloc(num_sites * sizeof(projection_t)); onto_projector_helper(kpt->bands[band_num], xup, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->up_projections); onto_projector_helper(kpt->bands[band_num], xdown, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->down_projections); mkl_free(xup); mkl_free(xdown); } void onto_smoothpw(kpoint_t *kpt, int band_num, real_proj_site_t *sites, int num_sites, int *G_bounds, double *lattice, double *reclattice, int num_cart_gridpts, int *fftg) { double *k = kpt->k; int *Gs = kpt->Gs; float complex *Cs = kpt->bands[band_num]->Cs; int num_waves = kpt->num_waves; double complex *x = (double complex *)mkl_calloc(fftg[0] * fftg[1] * fftg[2], sizeof(double complex), 64); CHECK_ALLOCATION(x); fft3d(x, G_bounds, lattice, k, Gs, Cs, num_waves, fftg); band_t *band = kpt->bands[band_num]; band->wave_projections = (projection_t *)malloc(num_sites * sizeof(projection_t)); CHECK_ALLOCATION(band->wave_projections); onto_projector_helper(kpt->bands[band_num], x, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->wave_projections); mkl_free(x); } void get_aug_freqs(kpoint_t *kpt, int band_num, real_proj_site_t *sites, int num_sites, int *G_bounds, double *lattice, double *reclattice, int num_cart_gridpts, int *fftg) { if (kpt->bands[band_num]->CAs != NULL) { return; } double *k = kpt->k; int *Gs = kpt->Gs; float complex *Cs = kpt->bands[band_num]->Cs; int num_waves = kpt->num_waves; double complex *x = (double complex *)mkl_calloc(fftg[0] * fftg[1] * fftg[2], sizeof(double complex), 64); CHECK_ALLOCATION(x); band_t *band = kpt->bands[band_num]; get_aug_freqs_helper(kpt->bands[band_num], x, sites, num_sites, lattice, reclattice, k, num_cart_gridpts, fftg, band->projections); band->CAs = (float complex *)mkl_calloc(kpt->num_waves, sizeof(float complex), 64); fwd_fft3d(x, G_bounds, lattice, k, Gs, band->CAs, num_waves, fftg); // for (int w = 0; w < kpt->num_waves; w++) { // band->CAs[w] += band->Cs[w]; // printf("%f %f %f %f\n", creal(band->CAs[w]), // cimag(band->CAs[w]), creal(band->Cs[w]), cimag(band->Cs[w])); //} mkl_free(x); } void add_num_cart_gridpts(ppot_t *pp_ptr, double *lattice, int *fftg) { ppot_t pp = *pp_ptr; double maga1 = mag(lattice + 0); double maga2 = mag(lattice + 3); double maga3 = mag(lattice + 6); double vtemp[3]; double vmag, sinphi123; double rmax = pp.rmax; if (pp.wave_grid[pp.wave_gridsize - 1] > rmax) { rmax = pp.wave_grid[pp.wave_gridsize - 1]; } double phi12 = acos(dot(lattice + 0, lattice + 3) / (maga1 * maga2)); vcross(vtemp, lattice + 0, lattice + 3); vmag = mag(vtemp); sinphi123 = dot(lattice + 6, vtemp) / (vmag * maga3); double na1maxA = rmax * fftg[0] / (maga1 * fabs(sin(phi12))) + 1; double na2maxA = rmax * fftg[1] / (maga2 * fabs(sin(phi12))) + 1; double na3maxA = rmax * fftg[2] / (maga3 * fabs(sinphi123)) + 1; int npmaxA = (int)(4.0 / 3.0 * PI * na1maxA * na2maxA * na3maxA) + 1; double phi13 = acos(dot(lattice + 0, lattice + 6) / (maga1 * maga3)); vcross(vtemp, lattice + 0, lattice + 6); vmag = mag(vtemp); sinphi123 = dot(lattice + 3, vtemp) / (vmag * maga2); double na1maxB = rmax * fftg[0] / (maga1 * fabs(sin(phi13))) + 1; double na2maxB = rmax * fftg[1] / (maga2 * fabs(sinphi123)) + 1; double na3maxB = rmax * fftg[2] / (maga3 * fabs(sin(phi13))) + 1; int npmaxB = (int)(4.0 / 3.0 * PI * na1maxB * na2maxB * na3maxB) + 1; double phi23 = acos(dot(lattice + 3, lattice + 6) / (maga2 * maga3)); vcross(vtemp, lattice + 3, lattice + 6); vmag = mag(vtemp); sinphi123 = dot(lattice, vtemp) / (vmag * maga1); double na1maxC = rmax * fftg[0] / (maga1 * fabs(sinphi123)) + 1; double na2maxC = rmax * fftg[1] / (maga2 * fabs(sin(phi23))) + 1; double na3maxC = rmax * fftg[2] / (maga3 * fabs(sin(phi23))) + 1; int npmaxC = (int)(4.0 / 3.0 * PI * na1maxC * na2maxC * na3maxC) + 1; int npmax = npmaxA; if (npmaxB > npmax) npmax = npmaxB; if (npmaxC > npmax) npmax = npmaxC; pp_ptr->num_cart_gridpts = npmax; } void make_pwave_overlap_matrices(ppot_t *pp_ptr) { ppot_t pp = *pp_ptr; int size = pp.num_projs * pp.num_projs; double *psov = (double *)calloc(size, sizeof(double)); double *aeov = (double *)calloc(size, sizeof(double)); double *diov = (double *)calloc(size, sizeof(double)); CHECK_ALLOCATION(psov); CHECK_ALLOCATION(aeov); CHECK_ALLOCATION(diov); for (int i = 0; i < pp.num_projs; i++) { for (int j = i; j < pp.num_projs; j++) { if (pp.funcs[i].l == pp.funcs[j].l) { double *ps1 = pp.funcs[i].pswave; double *ps2 = pp.funcs[j].pswave; double *ae1 = pp.funcs[i].aewave; double *ae2 = pp.funcs[j].aewave; double *psprod = (double *)malloc(pp.wave_gridsize * sizeof(double)); double *aeprod = (double *)malloc(pp.wave_gridsize * sizeof(double)); double *diprod = (double *)malloc(pp.wave_gridsize * sizeof(double)); for (int k = 0; k < pp.wave_gridsize; k++) { psprod[k] = ps1[k] * ps2[k]; aeprod[k] = ae1[k] * ae2[k]; diprod[k] = (ae1[k] - ps1[k]) * (ae2[k] - ps2[k]); } double **psspline = spline_coeff(pp.wave_grid, psprod, pp.wave_gridsize); double **aespline = spline_coeff(pp.wave_grid, aeprod, pp.wave_gridsize); double **displine = spline_coeff(pp.wave_grid, diprod, pp.wave_gridsize); psov[i * pp.num_projs + j] = spline_integral(pp.wave_grid, psprod, psspline, pp.wave_gridsize); aeov[i * pp.num_projs + j] = spline_integral(pp.wave_grid, aeprod, aespline, pp.wave_gridsize); diov[i * pp.num_projs + j] = spline_integral(pp.wave_grid, diprod, displine, pp.wave_gridsize); } } } for (int i = 1; i < pp.num_projs; i++) { for (int j = 0; j < i; j++) { psov[pp.num_projs * i + j] = psov[pp.num_projs * j + i]; aeov[pp.num_projs * i + j] = aeov[pp.num_projs * j + i]; diov[pp.num_projs * i + j] = diov[pp.num_projs * j + i]; } } pp_ptr->pspw_overlap_matrix = psov; pp_ptr->aepw_overlap_matrix = aeov; pp_ptr->diff_overlap_matrix = diov; } void setup_projections(pswf_t *wf, ppot_t *pps, int num_elems, int num_sites, int *fftg, int *labels, double *coords) { wf->num_sites = num_sites; wf->fftg = (int *)malloc(3 * sizeof(int)); wf->fftg[0] = fftg[0]; wf->fftg[1] = fftg[1]; wf->fftg[2] = fftg[2]; wf->num_elems = num_elems; wf->num_sites = num_sites; wf->pps = pps; printf("started setup_proj\n"); int num_cart_gridpts = 0; for (int p = 0; p < num_elems; p++) { add_num_cart_gridpts(pps + p, wf->lattice, fftg); if (pps[p].num_cart_gridpts > num_cart_gridpts) { num_cart_gridpts = pps[p].num_cart_gridpts; } } int NUM_KPTS = wf->nwk * wf->nspin; int NUM_BANDS = wf->nband; printf("calculating projector_values\n"); real_proj_site_t *sites = projector_values( num_sites, labels, coords, wf->lattice, wf->reclattice, pps, fftg); printf("onto_projector calcs %d %d\n", NUM_BANDS, NUM_KPTS); #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t *kpt = wf->kpts[w % NUM_KPTS]; int band_num = w / NUM_KPTS; if (wf->is_ncl) { onto_projector_ncl(kpt, band_num, sites, num_sites, wf->G_bounds, wf->lattice, wf->reclattice, num_cart_gridpts, fftg); } else { onto_projector(kpt, band_num, sites, num_sites, wf->G_bounds, wf->lattice, wf->reclattice, num_cart_gridpts, fftg); } } printf("Done \n"); free_real_proj_site_list(sites, num_sites); } void overlap_setup_real(pswf_t *wf_R, pswf_t *wf_S, int *labels_R, int *labels_S, double *coords_R, double *coords_S, int *N_R, int *N_S, int *N_RS_R, int *N_RS_S, int num_N_R, int num_N_S, int num_N_RS) { clean_wave_projections(wf_R); clean_wave_projections(wf_S); wf_R->wp_num = num_N_S; wf_S->wp_num = num_N_R; double complex **overlaps = NULL; if (num_N_RS > 0) { overlaps = (double complex **)malloc(num_N_RS * sizeof(double complex *)); CHECK_ALLOCATION(overlaps); } printf("STARTING OVERLAP_SETUP\n"); int NUM_KPTS = wf_R->nwk * wf_R->nspin; int NUM_BANDS = wf_S->nband; int max_num_indices = 0; if (num_N_R > 0) { real_proj_site_t *sites_N_R = smooth_pw_values(num_N_R, N_R, labels_R, coords_R, wf_S->lattice, wf_S->reclattice, wf_R->pps, wf_S->fftg); for (int s = 0; s < num_N_R; s++) { if (sites_N_R[s].num_indices > max_num_indices) { max_num_indices = sites_N_R[s].num_indices; } } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t *kpt_S = wf_S->kpts[w % NUM_KPTS]; onto_smoothpw(kpt_S, w / NUM_KPTS, sites_N_R, num_N_R, wf_S->G_bounds, wf_S->lattice, wf_S->reclattice, max_num_indices, wf_S->fftg); } free_real_proj_site_list(sites_N_R, num_N_R); } max_num_indices = 0; printf("PART 1 DONE\n"); if (num_N_S > 0) { real_proj_site_t *sites_N_S = smooth_pw_values(num_N_S, N_S, labels_S, coords_S, wf_R->lattice, wf_R->reclattice, wf_S->pps, wf_R->fftg); NUM_BANDS = wf_R->nband; for (int s = 0; s < num_N_S; s++) { if (sites_N_S[s].num_indices > max_num_indices) { max_num_indices = sites_N_S[s].num_indices; } } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t *kpt_R = wf_R->kpts[w % NUM_KPTS]; onto_smoothpw(kpt_R, w / NUM_KPTS, sites_N_S, num_N_S, wf_R->G_bounds, wf_R->lattice, wf_R->reclattice, max_num_indices, wf_R->fftg); } free_real_proj_site_list(sites_N_S, num_N_S); } printf("PART 2 DONE\n"); double *dcoords = NULL; if (num_N_RS > 0) { dcoords = (double *)malloc(3 * num_N_RS * sizeof(double)); CHECK_ALLOCATION(dcoords); } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int i = 0; i < num_N_RS; i++) { double R = 0; int l1, l2; int s1 = N_RS_R[i]; int s2 = N_RS_S[i]; ppot_t pp1 = wf_R->pps[labels_R[s1]]; ppot_t pp2 = wf_S->pps[labels_S[s2]]; // CALCULATE THE DIFF COORD HERE, PASS TO offsite_wave_overlap AND SAVE IT // FOR USE IN compensation_terms overlaps[i] = (double complex *)calloc(pp1.total_projs * pp2.total_projs, sizeof(double complex)); CHECK_ALLOCATION(overlaps[i]); double *coord1 = coords_R + 3 * s1; double *coord2 = coords_S + 3 * s2; min_cart_path(coord2, coord1, wf_R->lattice, dcoords + 3 * i, &R); int tj = 0; for (int j = 0; j < pp1.num_projs; j++) { l1 = pp1.funcs[j].l; for (int m1 = -l1; m1 <= l1; m1++) { int tk = 0; for (int k = 0; k < pp2.num_projs; k++) { l2 = pp2.funcs[k].l; for (int m2 = -l2; m2 <= l2; m2++) { overlaps[i][tj * pp2.total_projs + tk] = conj(reciprocal_offsite_wave_overlap( dcoords + 3 * i, pp1.kwave_grid, pp1.funcs[j].kwave, pp1.funcs[j].kwave_spline, pp1.wave_gridsize, pp2.kwave_grid, pp2.funcs[k].kwave, pp2.funcs[k].kwave_spline, pp2.wave_gridsize, wf_R->lattice, l1, m1, l2, m2)); tk++; } } tj++; } } } wf_S->overlaps = overlaps; wf_S->dcoords = dcoords; wf_S->num_aug_overlap_sites = num_N_RS; wf_R->num_aug_overlap_sites = num_N_RS; printf("PART 3 DONE\nFINISHED OVERLAP SETUP\n"); } void overlap_setup_recip(pswf_t *wf_R, pswf_t *wf_S, int *labels_R, int *labels_S, double *coords_R, double *coords_S, int *N_R, int *N_S, int *N_RS_R, int *N_RS_S, int num_N_R, int num_N_S, int num_N_RS) { clean_wave_projections(wf_R); clean_wave_projections(wf_S); wf_R->wp_num = num_N_S; wf_S->wp_num = num_N_R; double complex **overlaps = NULL; if (num_N_RS > 0) { overlaps = (double complex **)malloc(num_N_RS * sizeof(double complex *)); CHECK_ALLOCATION(overlaps); } printf("STARTING OVERLAP_SETUP RECIP\n"); int NUM_KPTS = wf_R->nwk * wf_R->nspin; int NUM_BANDS = wf_R->nband; int max_num_indices = 0; if (num_N_R > 0) { real_proj_site_t *sites_N_R = smooth_pw_values(num_N_R, N_R, labels_R, coords_R, wf_R->lattice, wf_R->reclattice, wf_R->pps, wf_R->fftg); for (int s = 0; s < num_N_R; s++) { if (sites_N_R[s].num_indices > max_num_indices) { max_num_indices = sites_N_R[s].num_indices; } } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t *kpt_R = wf_R->kpts[w % NUM_KPTS]; get_aug_freqs(kpt_R, w / NUM_KPTS, sites_N_R, num_N_R, wf_R->G_bounds, wf_R->lattice, wf_R->reclattice, max_num_indices, wf_R->fftg); } free_real_proj_site_list(sites_N_R, num_N_R); } max_num_indices = 0; printf("PART 1 DONE RECIP\n"); if (num_N_S > 0) { real_proj_site_t *sites_N_S = smooth_pw_values(num_N_S, N_S, labels_S, coords_S, wf_S->lattice, wf_S->reclattice, wf_S->pps, wf_S->fftg); NUM_BANDS = wf_S->nband; for (int s = 0; s < num_N_S; s++) { if (sites_N_S[s].num_indices > max_num_indices) { max_num_indices = sites_N_S[s].num_indices; } } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { kpoint_t *kpt_S = wf_S->kpts[w % NUM_KPTS]; get_aug_freqs(kpt_S, w / NUM_KPTS, sites_N_S, num_N_S, wf_S->G_bounds, wf_S->lattice, wf_S->reclattice, max_num_indices, wf_S->fftg); } free_real_proj_site_list(sites_N_S, num_N_S); } printf("PART 2 DONE RECIP\n"); double *dcoords = NULL; if (num_N_RS > 0) { dcoords = (double *)malloc(3 * num_N_RS * sizeof(double)); CHECK_ALLOCATION(dcoords); } #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int i = 0; i < num_N_RS; i++) { double R = 0; int l1, l2; int s1 = N_RS_R[i]; int s2 = N_RS_S[i]; ppot_t pp1 = wf_R->pps[labels_R[s1]]; ppot_t pp2 = wf_S->pps[labels_S[s2]]; // CALCULATE THE DIFF COORD HERE, PASS TO offsite_wave_overlap AND SAVE IT // FOR USE IN compensation_terms overlaps[i] = (double complex *)calloc(pp1.total_projs * pp2.total_projs, sizeof(double complex)); CHECK_ALLOCATION(overlaps[i]); double *coord1 = coords_R + 3 * s1; double *coord2 = coords_S + 3 * s2; min_cart_path(coord2, coord1, wf_R->lattice, dcoords + 3 * i, &R); int tj = 0; for (int j = 0; j < pp1.num_projs; j++) { l1 = pp1.funcs[j].l; for (int m1 = -l1; m1 <= l1; m1++) { int tk = 0; for (int k = 0; k < pp2.num_projs; k++) { l2 = pp2.funcs[k].l; for (int m2 = -l2; m2 <= l2; m2++) { overlaps[i][tj * pp2.total_projs + tk] = conj(reciprocal_offsite_wave_overlap( dcoords + 3 * i, pp1.kwave_grid, pp1.funcs[j].kwave, pp1.funcs[j].kwave_spline, pp1.wave_gridsize, pp2.kwave_grid, pp2.funcs[k].kwave, pp2.funcs[k].kwave_spline, pp2.wave_gridsize, wf_R->lattice, l1, m1, l2, m2)); tk++; } } tj++; } } } wf_S->overlaps = overlaps; wf_S->dcoords = dcoords; wf_S->num_aug_overlap_sites = num_N_RS; wf_R->num_aug_overlap_sites = num_N_RS; printf("PART 3 DONE RECIP\nFINISHED OVERLAP SETUP\n"); } void compensation_terms(double complex *overlap, int BAND_NUM, pswf_t *wf_S, pswf_t *wf_R, int num_M, int num_N_R, int num_N_S, int num_N_RS, int *M_R, int *M_S, int *N_R, int *N_S, int *N_RS_R, int *N_RS_S, int *proj_labels, double *proj_coords, int *ref_labels, double *ref_coords, int *fft_grid, int flip_spin) { setbuf(stdout, NULL); int NUM_KPTS = wf_R->nwk * wf_R->nspin; int NUM_BANDS = wf_R->nband; // double* overlap = (double*) calloc(2 * NUM_KPTS * NUM_BANDS, // sizeof(double)); CHECK_ALLOCATION(overlap); double complex **N_RS_overlaps = wf_S->overlaps; #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { int ni = 0, nj = 0; int kpt_ind_R = w % NUM_KPTS; if (wf_R->nspin == 2 && flip_spin) { if (kpt_ind_R < wf_R->nwk) { kpt_ind_R += wf_R->nwk; } else { kpt_ind_R -= wf_R->nwk; } } kpoint_t *kpt_R = wf_R->kpts[kpt_ind_R]; kpoint_t *kpt_S = wf_S->kpts[w % NUM_KPTS]; band_t *band_R = kpt_R->bands[w / NUM_KPTS]; band_t *band_S = kpt_S->bands[BAND_NUM]; double complex temp = 0 + 0 * I; for (int s = 0; s < num_M; s++) { ppot_t pp = wf_R->pps[ref_labels[M_R[s]]]; int s1 = M_R[s]; int s2 = M_S[s]; projection_t pron = band_R->projections[s1]; projection_t ppron = band_S->projections[s2]; for (int i = 0; i < pron.total_projs; i++) { for (int j = 0; j < ppron.total_projs; j++) { if (pron.ls[i] == ppron.ls[j] && pron.ms[i] == ppron.ms[j]) { nj = ppron.ns[j]; ni = pron.ns[i]; temp += conj(pron.overlaps[i]) * (pp.aepw_overlap_matrix[pp.num_projs * ni + nj] - pp.pspw_overlap_matrix[pp.num_projs * ni + nj]) * ppron.overlaps[j]; } } } } overlap[w] += temp; // overlap[2*w] = creal(temp); // overlap[2*w+1]= cimag(temp); // printf("temp 1 %lf %lf\n", creal(temp), cimag(temp)); temp = 0 + 0 * I; for (int s = 0; s < num_N_R; s++) { int site_num = N_R[s]; projection_t pron = band_R->projections[site_num]; projection_t ppron = band_S->wave_projections[s]; for (int i = 0; i < pron.total_projs; i++) { temp += ppron.overlaps[i] * conj(pron.overlaps[i]); } } overlap[w] += temp; // overlap[2*w] += creal(temp); // overlap[2*w+1]+= cimag(temp); // printf("temp 2 %lf %lf\n", creal(temp), cimag(temp)); temp = 0 + 0 * I; for (int s = 0; s < num_N_S; s++) { int site_num = N_S[s]; projection_t pron = band_R->wave_projections[s]; projection_t ppron = band_S->projections[site_num]; for (int i = 0; i < ppron.total_projs; i++) { temp += conj(pron.overlaps[i]) * ppron.overlaps[i]; } } overlap[w] += temp; // overlap[2*w] += creal(temp); // overlap[2*w+1]+= cimag(temp); // printf("temp 3 %d %d %d %lf %lf\n", kpt_S->num_waves, kpt_R->num_waves, // w%NUM_KPTS, creal(temp), cimag(temp)); temp = 0 + 0 * I; for (int s = 0; s < num_N_RS; s++) { int site_num1 = N_RS_R[s]; int site_num2 = N_RS_S[s]; projection_t pron = band_R->projections[site_num1]; projection_t ppron = band_S->projections[site_num2]; for (int i = 0; i < pron.total_projs; i++) { for (int j = 0; j < ppron.total_projs; j++) { temp += conj(pron.overlaps[i]) * (N_RS_overlaps[s][i * ppron.total_projs + j]) * ppron.overlaps[j] * cexp(2 * I * PI * dot(kpt_R->k, wf_S->dcoords + 3 * s)); } } } overlap[w] += temp; // overlap[2*w] += creal(temp); // overlap[2*w+1]+= cimag(temp); } } void compensation_terms_recip(double complex *overlap, int BAND_NUM, pswf_t *wf_S, pswf_t *wf_R, int num_M, int num_N_R, int num_N_S, int num_N_RS, int *M_R, int *M_S, int *N_R, int *N_S, int *N_RS_R, int *N_RS_S, int *proj_labels, double *proj_coords, int *ref_labels, double *ref_coords, int *fft_grid, int flip_spin) { setbuf(stdout, NULL); int NUM_KPTS = wf_R->nwk * wf_R->nspin; int NUM_BANDS = wf_R->nband; // double* overlap = (double*) calloc(2 * NUM_KPTS * NUM_BANDS, // sizeof(double)); CHECK_ALLOCATION(overlap); double complex **N_RS_overlaps = wf_S->overlaps; #if defined(_OPENMP) omp_set_num_threads(omp_get_max_threads()); #endif #pragma omp parallel for for (int w = 0; w < NUM_BANDS * NUM_KPTS; w++) { int ni = 0, nj = 0; int kpt_ind_R = w % NUM_KPTS; if (wf_R->nspin == 2 && flip_spin) { if (kpt_ind_R < wf_R->nwk) { kpt_ind_R += wf_R->nwk; } else { kpt_ind_R -= wf_R->nwk; } } kpoint_t *kpt_R = wf_R->kpts[kpt_ind_R]; kpoint_t *kpt_S = wf_S->kpts[w % NUM_KPTS]; band_t *band_R = kpt_R->bands[w / NUM_KPTS]; band_t *band_S = kpt_S->bands[BAND_NUM]; float complex *C1s = NULL; float complex *C2s = NULL; float complex curr_overlap; int num_waves; if (band_R->CAs != NULL) { curr_overlap = 0; C1s = band_S->Cs; C2s = band_R->CAs; num_waves = kpt_R->num_waves; cblas_cdotc_sub(num_waves, C2s, 1, C1s, 1, &curr_overlap); overlap[w] += (double complex)curr_overlap; } if (band_S->CAs != NULL) { curr_overlap = 0; C1s = band_S->CAs; C2s = band_R->Cs; num_waves = kpt_R->num_waves; cblas_cdotc_sub(num_waves, C2s, 1, C1s, 1, &curr_overlap); overlap[w] += (double complex)curr_overlap; } // printf("part 1 %d %d %d %lf %lf %f %f\n", BAND_NUM, w/NUM_KPTS, // w%NUM_KPTS, creal(overlap[w]), cimag(overlap[w]), // creal(curr_overlap), // cimag(curr_overlap)); double complex temp = 0 + 0 * I; for (int s = 0; s < num_M; s++) { ppot_t pp = wf_R->pps[ref_labels[M_R[s]]]; int s1 = M_R[s]; int s2 = M_S[s]; projection_t pron = band_R->projections[s1]; projection_t ppron = band_S->projections[s2]; for (int i = 0; i < pron.total_projs; i++) { for (int j = 0; j < ppron.total_projs; j++) { if (pron.ls[i] == ppron.ls[j] && pron.ms[i] == ppron.ms[j]) { nj = ppron.ns[j]; ni = pron.ns[i]; temp += conj(pron.overlaps[i]) * (pp.aepw_overlap_matrix[pp.num_projs * ni + nj] - pp.pspw_overlap_matrix[pp.num_projs * ni + nj]) * ppron.overlaps[j]; } } } } overlap[w] += temp; // printf("part 2 %lf %lf\n", creal(overlap[w]), cimag(overlap[w])); temp = 0 + 0 * I; for (int s = 0; s < num_N_RS; s++) { int site_num1 = N_RS_R[s]; int site_num2 = N_RS_S[s]; projection_t pron = band_R->projections[site_num1]; projection_t ppron = band_S->projections[site_num2]; for (int i = 0; i < pron.total_projs; i++) { for (int j = 0; j < ppron.total_projs; j++) { temp += conj(pron.overlaps[i]) * (N_RS_overlaps[s][i * ppron.total_projs + j]) * ppron.overlaps[j] * cexp(2 * I * PI * dot(kpt_R->k, wf_S->dcoords + 3 * s)); } } } overlap[w] += temp; // printf("part 3 %lf %lf\n", creal(overlap[w]), cimag(overlap[w])); // overlap[2*w] += creal(temp); // overlap[2*w+1]+= cimag(temp); } }
scheduled-clause.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main(int argc, char **argv) { int i, n=200,chunk,a[n],suma=0; int dyn_var_value, nthreads_var_value, thread_limit_var; omp_sched_t kind; int modifier = 0; int primera = 1; if(argc < 3) { fprintf(stderr,"\nFalta iteraciones o chunk \n"); exit(-1); } n = atoi(argv[1]); if (n>200) n=200; chunk = atoi(argv[2]); for (i=0; i<n; i++) a[i] = i; printf(" Antes de la modificación: \n"); printf(" dyn-var-> %d ;nthreads-var->%d; thread-limit-var->%d; run-sched-var-kind->%d; run-sched-var-modifier->%d; \n", omp_get_dynamic(),omp_get_max_threads(),omp_get_thread_limit(), kind, modifier); //Modificación de las variables omp_set_num_threads(16); omp_set_schedule((omp_sched_t) 3,1); omp_set_dynamic(8); #pragma omp parallel for firstprivate(suma) \ lastprivate(suma) schedule(static,chunk) for (i=0; i<n; i++) { #pragma omp critical if(primera == 1){ primera = 0; omp_get_schedule(&kind, &modifier); printf(" Dentro del parallel: \n"); printf(" dyn-var-> %d ;nthreads-var->%d; thread-limit-var->%d; run-sched-var-kind->%d; run-sched-var-modifier->%d; \n", omp_get_dynamic(),omp_get_max_threads(),omp_get_thread_limit(), kind, modifier); } #pragma omp atomic suma = suma + a[i]; #pragma omp critical printf(" thread %d suma a[%d]=%d suma=%d \n", omp_get_thread_num(),i,a[i],suma); } printf("Fuera de 'parallel for' suma=%d\n",suma); return(0); }
simd2.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ /* { dg-additional-options "-std=c99" { target c } } */ extern int a[13][13][13][13], k, l, m; void foo (int *q, float *p) { int i, j, n, o; #pragma omp simd collapse (4) linear(k : m + 1) aligned(p, q) for (i = 0; i < 13; i++) for (j = 0; j < 13; j++) for (n = 0; n < 13; n++) for (o = 0; o < 13; o += 2) q[k] *= p[k] + 7 * i + 14 * j + 21 * n + 28 * o, k += m + 1; } void bar (float *p) { int i, j, n, o; #pragma omp simd collapse (4) linear(k : m + 1) for (i = 0; i < 13; i++) for (j = 0; j < 13; j++) for (n = 0; n < 13; n++) for (o = 0; o < 13; o += 2) a[i][j][n][o] *= p[k], k += m + 1; }
GB_unaryop__identity_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_int64 // op(A') function: GB_tran__identity_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_int64 ( uint32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bfs_replicated.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #define _GNU_SOURCE #include "common.h" #include "oned_csr.h" #include "onesided.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> static oned_csr_graph g; static unsigned long* g_in_queue; static unsigned long* g_in_queue_summary; static unsigned long* g_out_queue; static unsigned long* g_out_queue_summary; static unsigned long* g_visited; const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; const int ulong_bits_squared = sizeof(unsigned long) * sizeof(unsigned long) * CHAR_BIT * CHAR_BIT; static void allocate_memory(void) { int64_t maxlocalverts = g.max_nlocalverts; int64_t local_queue_summary_size = (maxlocalverts + ulong_bits_squared - 1) / ulong_bits_squared; int64_t local_queue_size = local_queue_summary_size * ulong_bits; int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); g_in_queue = (unsigned long*)xmalloc(global_queue_size * sizeof(unsigned long)); g_in_queue_summary = (unsigned long*)xmalloc(global_queue_summary_size * sizeof(unsigned long)); g_out_queue = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); g_out_queue_summary = (unsigned long*)xmalloc(local_queue_summary_size * sizeof(unsigned long)); g_visited = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); } static void deallocate_memory(void) { free(g_in_queue); g_in_queue = NULL; free(g_in_queue_summary); g_in_queue_summary = NULL; free(g_out_queue); g_out_queue = NULL; free(g_out_queue_summary); g_out_queue_summary = NULL; free(g_visited); g_visited = NULL; } void make_graph_data_structure(const tuple_graph* const tg) { convert_graph_to_oned_csr(tg, &g); allocate_memory(); /* Make sure all of the space is available */ deallocate_memory(); } void free_graph_data_structure(void) { free_oned_csr_graph(&g); /* deallocate_memory(); */ } int bfs_writes_depth_map(void) {return 1;} /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ void run_bfs(int64_t root, int64_t* pred) { allocate_memory(); const ptrdiff_t nlocalverts = g.nlocalverts; const size_t* const restrict rowstarts = g.rowstarts; const int64_t* const restrict column = g.column; int64_t maxlocalverts = g.max_nlocalverts; /* Set up the visited bitmap. */ const int ulong_bits = sizeof(unsigned long) * CHAR_BIT; const int ulong_bits_squared = ulong_bits * ulong_bits; int64_t local_queue_summary_size = (maxlocalverts + ulong_bits_squared - 1) / ulong_bits_squared; int64_t local_queue_size = local_queue_summary_size * ulong_bits; int lg_local_queue_size = lg_int64_t(local_queue_size); int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); #define SWIZZLE_VERTEX(c) ((VERTEX_OWNER(c) << lg_local_queue_size) * ulong_bits | VERTEX_LOCAL(c)) #if 0 int64_t* restrict column_swizzled = (int64_t*)xmalloc(nlocaledges * sizeof(int64_t)); { size_t i; for (i = 0; i < nlocaledges; ++i) { int64_t c = column[i]; column_swizzled[i] = SWIZZLE_VERTEX(c); } } #endif unsigned long* restrict in_queue = g_in_queue; memset(in_queue, 0, global_queue_size * sizeof(unsigned long)); unsigned long* restrict in_queue_summary = g_in_queue_summary; memset(in_queue_summary, 0, global_queue_summary_size * sizeof(unsigned long)); unsigned long* restrict out_queue = g_out_queue; unsigned long* restrict out_queue_summary = g_out_queue_summary; unsigned long* restrict visited = g_visited; memset(visited, 0, local_queue_size * sizeof(unsigned long)); #define SET_IN(v) do {int64_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ulong_bits; int bit_idx = vs % ulong_bits; unsigned long mask = (1UL << bit_idx); in_queue_summary[word_idx / ulong_bits] |= (1UL << (word_idx % ulong_bits)); in_queue[word_idx] |= mask;} while (0) #define TEST_IN(vs) (((in_queue_summary[vs / ulong_bits / ulong_bits] & (1UL << ((vs / ulong_bits) % ulong_bits))) != 0) && ((in_queue[vs / ulong_bits] & (1UL << (vs % ulong_bits))) != 0)) #define TEST_VISITED_LOCAL(v) ((visited[(v) / ulong_bits] & (1UL << ((v) % ulong_bits))) != 0) // #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ulong_bits; int bit_idx = (v) % ulong_bits; unsigned long mask = (1UL << bit_idx); __sync_fetch_and_or(&visited[word_idx], mask); __sync_fetch_and_or(&out_queue[word_idx], mask);} while (0) #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ulong_bits; int bit_idx = (v) % ulong_bits; unsigned long mask = (1UL << bit_idx); visited[word_idx] |= mask; out_queue[word_idx] |= mask;} while (0) SET_IN(root); {ptrdiff_t i; _Pragma("omp parallel for schedule(static)") for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} if (VERTEX_OWNER(root) == rank) { pred[VERTEX_LOCAL(root)] = root; SET_VISITED_LOCAL(VERTEX_LOCAL(root)); } uint16_t cur_level = 0; while (1) { ++cur_level; #if 0 if (rank == 0) fprintf(stderr, "BFS level %" PRIu16 "\n", cur_level); #endif memset(out_queue, 0, local_queue_size * sizeof(unsigned long)); // memset(out_queue_summary, 0, local_queue_summary_size * sizeof(unsigned long)); ptrdiff_t i, ii; #if 0 #pragma omp parallel for schedule(static) for (i = 0; i < global_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ulong_bits; ++j, mask <<= 1) { if (in_queue[i * ulong_bits + j]) val |= mask; } in_queue_summary[i] = val; } #endif unsigned long not_done = 0; #pragma omp parallel for schedule(static) reduction(|:not_done) for (ii = 0; ii < nlocalverts; ii += ulong_bits) { size_t i, i_end = ii + ulong_bits; if (i_end > nlocalverts) i_end = nlocalverts; for (i = ii; i < i_end; ++i) { if (!TEST_VISITED_LOCAL(i)) { size_t j, j_end = rowstarts[i + 1]; for (j = rowstarts[i]; j < j_end; ++j) { int64_t v1 = column[j]; int64_t v1_swizzled = SWIZZLE_VERTEX(v1); if (TEST_IN(v1_swizzled)) { pred[i] = (v1 & INT64_C(0xFFFFFFFFFFFF)) | ((int64_t)cur_level << 48); not_done |= 1; SET_VISITED_LOCAL(i); break; } } } } } #if 1 #pragma omp parallel for schedule(static) for (i = 0; i < local_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ulong_bits; ++j, mask <<= 1) { unsigned long full_val = out_queue[i * ulong_bits + j]; visited[i * ulong_bits + j] |= full_val; if (full_val) val |= mask; } out_queue_summary[i] = val; // not_done |= val; } #endif MPI_Allreduce(MPI_IN_PLACE, &not_done, 1, MPI_UNSIGNED_LONG, MPI_BOR, MPI_COMM_WORLD); if (not_done == 0) break; MPI_Allgather(out_queue, local_queue_size, MPI_UNSIGNED_LONG, in_queue, local_queue_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); MPI_Allgather(out_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, in_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); } deallocate_memory(); } void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) { const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { int64_t v = vertex[i]; owner[i] = VERTEX_OWNER(v); local[i] = VERTEX_LOCAL(v); } } int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
adagrad_op.h
#pragma once #include "caffe2/core/operator.h" namespace caffe2 { template <typename Context> void adagrad_update( int N, const float* g, const float* h, float* ng, float* nh, float epsilon, const float* lr, Context* context) { // TODO(cxj): use OMP when it is reliable // #pragma omp parallel for for (auto i = 0; i < N; ++i) { float gi = g[i]; float hi = nh[i] = h[i] + gi * gi; ng[i] = lr[0] * gi / (sqrt(hi) + epsilon); } } template <typename T, class Context> class AdagradOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; AdagradOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5)) {} bool RunOnDevice() override { CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENT_1).size()); Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1)); adagrad_update<Context>( Input(GRAD).size(), Input(GRAD).template data<T>(), Input(MOMENT_1).template data<T>(), Output(OUTPUT_GRAD)->template mutable_data<T>(), Output(OUTPUT_MOMENT_1)->template mutable_data<T>(), epsilon_, Input(LR).template data<T>(), &context_); return true; } protected: T epsilon_{1e-8}; INPUT_TAGS(GRAD, MOMENT_1, LR); OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENT_1); }; template <typename T, class Context> class SparseAdagradOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; SparseAdagradOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(OperatorBase::GetSingleArgument<float>("epsilon", 1e-5)) {} bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename SIndex> bool DoRunWithType() { const auto* lr = Input(LR).template data<T>(); Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1)); auto n = Input(GRAD).dim(0); auto block_size = Input(GRAD).size() / n; const auto* indices = Input(INDICES).template data<SIndex>(); const auto* gradIn = Input(GRAD).template data<T>(); const auto* momentIn = Input(MOMENT_1).template data<T>(); auto* gradOut = Output(OUTPUT_GRAD)->template mutable_data<T>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>(); // TODO(cxj): use OMP when it is reliable // #pragma omp parallel for for (auto i = 0; i < n; ++i) { auto idx = indices[i]; if (block_size == 1) { float gi = gradIn[i]; float hi = momentOut[idx] = momentIn[idx] + gi * gi; gradOut[i] = lr[0] * gi / (sqrt(hi) + epsilon_); } else { auto offsetI = i * block_size; auto offsetIdx = idx * block_size; adagrad_update( block_size, gradIn + offsetI, momentIn + offsetIdx, gradOut + offsetI, momentOut + offsetIdx, epsilon_, lr, &context_); } } return true; } protected: T epsilon_{1e-8}; INPUT_TAGS(INDICES, GRAD, MOMENT_1, LR); OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENT_1); }; }
jacobi.pluto.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define pmax(x,y) ((x) > (y)? (x) : (y)) #define pmin(x,y) ((x) < (y)? (x) : (y)) #include "smoothers.h" #include <math.h> // cannot be 8 or below #ifndef TS #define TS 16 #endif #ifndef T3 #define T3 64 #endif #ifndef T4 #define T4 256 #endif //void jacobi( GRID &u, GRID &f, GRID &tmp, int nu ) { long long int jacobi( GRID &u, GRID &f, GRID &tmp, int nu ) { int N = u.n; int lda = u.lda; int lda2 = u.lda * u.lda; double hh = u.h*u.h; double invhh = 1.0 / hh; double DinvXomega = hh/6.0 * 8.0/9.0; double* w = &(tmp.p[0][0][0]); double* a = &(u.p[0][0][0]); double* b = &(f.p[0][0][0]); long long int count = 0; #ifdef USE_MM_ALLOC __assume_aligned(w,64); __assume_aligned(a,64); __assume_aligned(b,64); #endif if ((N >= 1) && (nu >= 1)) { for (int t1=-1;t1<=floord(nu-1,TS);t1++) { int lbp=pmax(ceild(t1,2),ceild(TS*t1-nu+2,TS)); int ubp=pmin(floord(nu+N-1,TS),floord((TS/2)*t1+N+(TS/2)-1,TS)); #pragma omp parallel for for (int t2=lbp;t2<=ubp;t2++) { for (int t3=pmax(pmax(0,ceild(t1-1,2)),ceild(TS*t2-N-(T3-2),T3));t3<=pmin(pmin(floord(nu+N-1,T3),floord((TS/2)*t1+N+T3-1,T3)),floord(TS*t2+N+T3-2,T3));t3++) { for (int t4=pmax(pmax(pmax(0,ceild(t1-((T4/4)-1),(T4/4))),ceild(TS*t2-N-(T4-2),T4)),ceild(T3*t3-N-(T4-2),T4));t4<=pmin(pmin(pmin(floord(nu+N-1,T4),floord((TS/2)*t1+N+TS-1,T4)),floord(TS*t2+N+TS-2,T4)),floord(T3*t3+N+T3-2,T4));t4++) { for (int t5=pmax(pmax(pmax(pmax(pmax(0,(TS/2)*t1),TS*t2-N),T3*t3-N),T4*t4-N),TS*t1-TS*t2+1);t5<=pmin(pmin(pmin(pmin(pmin(nu-1,(TS/2)*t1+TS-1),TS*t2+TS-2),T3*t3+T3-2),T4*t4+T4-2),TS*t1-TS*t2+N+(TS-1));t5++) { #pragma loop_count min(1),max(8),avg(4) for (int t6=pmax(pmax(TS*t2,t5+1),-TS*t1+TS*t2+2*t5-(TS-1));t6<=pmin(pmin(TS*t2+TS-1,t5+N),-TS*t1+TS*t2+2*t5);t6++) { #pragma loop_count min(1),max(8),avg(4) for (int t7=pmax(T3*t3,t5+1);t7<=pmin(T3*t3+T3-1,t5+N);t7++) { int lbv= (-t5+t6)*lda2 + (-t5+t7)*lda -t5+pmax(T4*t4,t5+1); int ubv= (-t5+t6)*lda2 + (-t5+t7)*lda -t5+pmin(T4*t4+T4-1,t5+N); int ks = lbv-lda; int kn = lbv+lda; int ke = lbv-1; int kw = lbv+1; int kf = lbv-lda2; int kb = lbv+lda2; if (t5%2==0) { #pragma loop_count min(1),max(64),avg(32) #pragma ivdep #pragma vector always for (int k=lbv;k<=ubv;k++) { w[k] = a[k] - DinvXomega*((6.0*a[k]-a[kw]-a[ke]-a[kn]-a[ks]-a[kf]-a[kb])*invhh - b[k]); kn++; ks++; ke++; kw++; kf++; kb++; //count++; } } else { #pragma loop_count min(1),max(64),avg(32) #pragma ivdep #pragma vector always for (int k=lbv;k<=ubv;k++) { a[k] = w[k] - DinvXomega*((6.0*w[k]-w[kw]-w[ke]-w[kn]-w[ks]-w[kf]-w[kb])*invhh - b[k]); kn++; ks++; ke++; kw++; kf++; kb++; // count++; } } } } } } } } } } if (nu%2==1) { double*** t = u.p; u.p = tmp.p; tmp.p = t; } return count; }
omp_single_nowait.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include "omp_testsuite.h" /* * This test will hang if the nowait is not working properly * * It relies on a one thread skipping to the last single construct to * release the threads in the first three single constructs */ volatile int release; volatile int count; void wait_for_release_then_increment(int rank) { fprintf(stderr, "Thread nr %d enters first section" " and waits.\n", rank); while (release == 0); #pragma omp atomic count++; } void release_and_increment(int rank) { fprintf(stderr, "Thread nr %d sets release to 1\n", rank); release = 1; #pragma omp atomic count++; } int test_omp_single_nowait() { release = 0; count = 0; #pragma omp parallel num_threads(4) { int rank; rank = omp_get_thread_num (); #pragma omp single nowait { wait_for_release_then_increment(rank); } #pragma omp single nowait { wait_for_release_then_increment(rank); } #pragma omp single nowait { wait_for_release_then_increment(rank); } #pragma omp single { release_and_increment(rank); } } // Check to make sure all four singles were executed return (count==4); } /* end of check_single_nowait*/ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_single_nowait()) { num_failed++; } } return num_failed; }
TaskDispatcher.h
#include "nvtt.h" // OpenMP // http://en.wikipedia.org/wiki/OpenMP #if defined(HAVE_OPENMP) #include <omp.h> #endif // Gran Central Dispatch (GCD/libdispatch) // http://developer.apple.com/mac/library/documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html #if NV_OS_DARWIN && defined(HAVE_DISPATCH_H) //#define HAVE_GCD 1 //#include <dispatch/dispatch.h> #endif // Parallel Patterns Library (PPL) is part of Microsoft's concurrency runtime: // http://msdn.microsoft.com/en-us/library/dd504870.aspx #if NV_OS_WIN32 && _MSC_VER >= 1600 //#define HAVE_PPL 1 #include <ppl.h> #endif // Intel Thread Building Blocks (TBB). // http://www.threadingbuildingblocks.org/ #if defined(HAVE_TBB) #include <tbb/parallel_for.h> #endif #include "nvthread/ParallelFor.h" namespace nvtt { struct SequentialTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { for (int i = 0; i < count; i++) { task(context, i); } } }; struct ParallelTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { nv::ParallelFor parallelFor(task, context); parallelFor.run(count); // @@ Add support for custom grain. } }; #if defined(HAVE_OPENMP) struct OpenMPTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { #pragma omp parallel for for (int i = 0; i < count; i++) { task(context, i); } } }; #endif #if HAVE_GCD // Task dispatcher using Apple's Grand Central Dispatch. struct AppleTaskDispatcher : public TaskDispatcher { // @@ This is really lame, but I refuse to use size_t in the public API. struct BlockContext { Task * task; void * context; }; static void block(void * context, size_t id) { BlockContext * ctx = (BlockContext *)context; ctx->task(ctx->context, int(id)); } virtual void dispatch(Task * task, void * context, int count) { dispatch_queue_t q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); BlockContext blockCtx = { task, context }; dispatch_apply_f(count, q, &blockCtx, block); } }; #endif #if defined(HAVE_PPL) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Microsoft's concurrency runtime. struct MicrosoftTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { TaskFunctor func(task, context); Concurrency::parallel_for(0, count, func); } }; #endif #if defined(HAVE_TBB) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int & n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Inte's Thread Building Blocks. struct IntelTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { parallel_for(blocked_range<int>(0, count, 1), TaskFunctor(task, context)); } }; #endif #if defined(HAVE_OPENMP) typedef OpenMPTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_TBB) typedef IntelTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_PPL) typedef MicrosoftTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_GCD) typedef AppleTaskDispatcher ConcurrentTaskDispatcher; #else //typedef SequentialTaskDispatcher ConcurrentTaskDispatcher; typedef ParallelTaskDispatcher ConcurrentTaskDispatcher; #endif } // namespace nvtt
A1_5.c
// gcc -std=c99 -Wall -lm -fopenmp -o go A1_5.c #include <stdio.h> #include <omp.h> #include <stdlib.h> void display(int, int *); void swap(int *, int *); unsigned long long tick(void) { unsigned long long d; __asm__ __volatile__("rdtsc": "=A"(d)); return d; } int main(int argc, char *argv[]) { int n = 10; int* v1 = (int *)malloc(n); int* v2 = (int *)malloc(n); for (int i = 0; i < n; ++i) { v1[i] = rand() % 25; //RAND_MAX; v2[i] = v1[i]; } display(n, v1); display(n, v2); // unsigned long long start = tick(); // for (int i = n - 1; i > 0; --i) // { // int imax = i; // for (int j = 0; j < i; ++j) // { // if (*(v1 + j) > *(v1 + imax)) // imax = j; // } // if (imax != i) // swap(v1 + imax, v1 + i); // } // double t_org = (double)(clock() - start); // #pragma omp for // for (int i = n - 1; i > 0; --i) // { // int imax = i; // // parallelize this for loop // #pragma omp critical // for (int j = 0; j < i; ++j) // { // if (*(v2 + j) > *(v2 + imax)) // imax = j; // } // if (imax != i) // swap(v2 + imax, v2 + i); // } // double t_omp = (double)(clock() - start - t_org); // printf("\n ==== ORIGIN ====\n"); // display(n, v1); // printf("\n ==== OPENMP ====\n"); // display(n, v2); // printf("Time-origin: %.2f\nTime-openmp: %.2f\n", t_org, t_omp); free(v1); free(v2); return 0; } void display(int n, int *v) { for (int i = 0; i < n; ++i) printf("%d\t", v[i]); printf("\n"); } void swap(int *x, int *y) { int z = *x; *x = *y; *y = z; }
GB_binop__first_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__first_bool // A.*B function (eWiseMult): GB_AemultB__first_bool // A*D function (colscale): GB_AxD__first_bool // D*A function (rowscale): GB_DxB__first_bool // C+=B function (dense accum): GB_Cdense_accumB__first_bool // C+=b function (dense accum): GB_Cdense_accumb__first_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_bool // C=scalar+B GB_bind1st__first_bool // C=scalar+B' GB_bind1st_tran__first_bool // C=A+scalar (none) // C=A'+scalar (none) // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = aij #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = x ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_BOOL || GxB_NO_FIRST_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__first_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__first_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__first_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__first_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__first_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__first_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__first_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__first_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB_bind1st_tran__first_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
lis_matrix_dia.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" /************************************************ * function | SOM | *-----------------------------+-----+ * lis_matrix_set | o | * lis_matrix_setDLU | o | * lis_matrix_malloc | o | * lis_matrix_elements_copy | o | * lis_matrix_transpose | o | * lis_matrix_split | o | * lis_matrix_merge | o | *-----------------------------+-----+-----+ * function |merge|split| *-----------------------------+-----+-----| * lis_matrix_convert | o | | * lis_matrix_copy | o | o | * lis_matrix_get_diagonal | o | o | * lis_matrix_scaling | o | o | * lis_matrix_scaling_symm | o | o | * lis_matrix_normf | o | o | * lis_matrix_sort | o | o | * lis_matrix_solve | xxx | o | * lis_matrix_solvet | xxx | o | ************************************************/ #undef __FUNC__ #define __FUNC__ "lis_matrix_set_dia" LIS_INT lis_matrix_set_dia(LIS_INT nnd, LIS_INT *index, LIS_SCALAR *value, LIS_MATRIX A) { LIS_INT err; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->index = index; A->value = value; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_DIA; A->nnd = nnd; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_setDLU_dia" LIS_INT lis_matrix_setDLU_dia(LIS_INT lnnd, LIS_INT unnd, LIS_SCALAR *diag, LIS_INT *lindex, LIS_SCALAR *lvalue, LIS_INT *uindex, LIS_SCALAR *uvalue, LIS_MATRIX A) { LIS_INT err; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->L = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_dia::A->L"); if( A->L==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); return LIS_OUT_OF_MEMORY; } A->U = (LIS_MATRIX_CORE)lis_calloc(sizeof(struct LIS_MATRIX_CORE_STRUCT),"lis_matrix_setDLU_dia::A->U"); if( A->U==NULL ) { LIS_SETERR_MEM(sizeof(struct LIS_MATRIX_CORE_STRUCT)); lis_matrix_DLU_destroy(A); return LIS_OUT_OF_MEMORY; } err = lis_matrix_diag_create(A->n,0,A->comm,&D); if( err ) { lis_matrix_DLU_destroy(A); return err; } lis_free(D->value); D->value = diag; A->D = D; A->L->nnd = lnnd; A->L->index = lindex; A->L->value = lvalue; A->U->nnd = unnd; A->U->index = uindex; A->U->value = uvalue; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_DIA; A->is_splited = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_malloc_dia" LIS_INT lis_matrix_malloc_dia(LIS_INT n, LIS_INT nnd, LIS_INT **index, LIS_SCALAR **value) { LIS_DEBUG_FUNC_IN; *index = NULL; *value = NULL; *index = (LIS_INT *)lis_malloc( n*nnd*sizeof(LIS_INT),"lis_matrix_malloc_dia::index" ); if( *index==NULL ) { LIS_SETERR_MEM(n*nnd*sizeof(LIS_INT)); lis_free2(2,*index,*value); return LIS_OUT_OF_MEMORY; } *value = (LIS_SCALAR *)lis_malloc( n*nnd*sizeof(LIS_SCALAR),"lis_matrix_malloc_dia::value" ); if( *value==NULL ) { LIS_SETERR_MEM(n*nnd*sizeof(LIS_SCALAR)); lis_free2(2,*index,*value); return LIS_OUT_OF_MEMORY; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_elements_copy_dia" LIS_INT lis_matrix_elements_copy_dia(LIS_INT n, LIS_INT nnd, LIS_INT *index, LIS_SCALAR *value, LIS_INT *o_index, LIS_SCALAR *o_value) { LIS_INT is,ie; LIS_INT nprocs,my_rank; LIS_DEBUG_FUNC_IN; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif memcpy(o_index,index,nnd*sizeof(LIS_INT)); #ifdef _OPENMP #pragma omp parallel private(is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie) memcpy(&o_value[is*nnd],&value[is*nnd],(ie-is)*nnd*sizeof(LIS_SCALAR)); } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_copy_dia" LIS_INT lis_matrix_copy_dia(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT err; LIS_INT i,n,nnd,lnnd,unnd; LIS_INT *index; LIS_INT *lindex; LIS_INT *uindex; LIS_SCALAR *value,*lvalue,*uvalue,*diag; LIS_DEBUG_FUNC_IN; n = Ain->n; if( Ain->is_splited ) { lnnd = Ain->L->nnd; unnd = Ain->U->nnd; lindex = NULL; uindex = NULL; diag = NULL; err = lis_matrix_malloc_dia(n,lnnd,&lindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_dia(n,unnd,&uindex,&uvalue); if( err ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } diag = (LIS_SCALAR *)lis_malloc(n*sizeof(LIS_SCALAR),"lis_matrix_copy_dia::diag"); if( diag==NULL ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { diag[i] = Ain->D->value[i]; } lis_matrix_elements_copy_dia(n,lnnd,Ain->L->index,Ain->L->value,lindex,lvalue); lis_matrix_elements_copy_dia(n,unnd,Ain->U->index,Ain->U->value,uindex,uvalue); err = lis_matrix_setDLU_dia(lnnd,unnd,diag,lindex,lvalue,uindex,uvalue,Aout); if( err ) { lis_free2(5,diag,uindex,lindex,uvalue,lvalue); return err; } } if( !Ain->is_splited || (Ain->is_splited && Ain->is_save) ) { index = NULL; value = NULL; nnd = Ain->nnd; err = lis_matrix_malloc_dia(n,nnd,&index,&value); if( err ) { return err; } lis_matrix_elements_copy_dia(n,nnd,Ain->index,Ain->value,index,value); err = lis_matrix_set_dia(nnd,index,value,Aout); if( err ) { lis_free2(2,index,value); return err; } } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_get_diagonal_dia" LIS_INT lis_matrix_get_diagonal_dia(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j; LIS_INT n,nnd; #ifdef _OPENMP LIS_INT is,ie,my_rank,nprocs; #endif LIS_DEBUG_FUNC_IN; n = A->n; nnd = A->nnd; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { d[i] = A->D->value[i]; } } else { #ifdef _OPENMP nprocs = omp_get_max_threads(); for(j=0;j<nnd;j++) { if( A->index[j]==0 ) break; } #pragma omp parallel private(is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&d[is],&A->value[is*nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } #else for(j=0;j<nnd;j++) { if( A->index[j]==0 ) break; } for(i=0;i<n;i++) { d[i] = A->value[j*n+i]; } #endif } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_dia" LIS_INT lis_matrix_scaling_dia(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j,js,je,jj; LIS_INT n,np,nnd; #ifdef _OPENMP LIS_INT k,is,ie,ii; LIS_INT my_rank,nprocs; #endif LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank) { nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie) for(i=is;i<ie;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->nnd;j++) { jj = A->L->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*A->L->nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->L->value[k + ii++] *= d[i]; } } for(j=0;j<A->U->nnd;j++) { jj = A->U->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*A->U->nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->U->value[k + ii++] *= d[i]; } } } #else for(i=0;i<n;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->nnd;j++) { jj = A->L->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->L->value[j*n + i] *= d[i]; } } for(j=0;j<A->U->nnd;j++) { jj = A->U->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->U->value[j*n + i] *= d[i]; } } #endif } else { nnd = A->nnd; #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank) { nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie) for(j=0;j<nnd;j++) { jj = A->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->value[k + ii] *= d[i]; ii++; } } } #else for(j=0;j<nnd;j++) { jj = A->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->value[j*n + i] *= d[i]; } } #endif } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_symm_dia" LIS_INT lis_matrix_scaling_symm_dia(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j,js,je,jj; LIS_INT n,np,nnd; #ifdef _OPENMP LIS_INT k,is,ie,ii; LIS_INT my_rank,nprocs; #endif LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; nnd = A->nnd; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank) { nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie) for(i=is;i<ie;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->nnd;j++) { jj = A->L->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*A->L->nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->L->value[k + ii++] *= d[i]*d[i+A->L->index[j]];; } } for(j=0;j<A->U->nnd;j++) { jj = A->U->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*A->U->nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->U->value[k + ii++] *= d[i]*d[i+A->U->index[j]];; } } } #else for(i=0;i<n;i++) { A->D->value[i] = 1.0; } for(j=0;j<A->L->nnd;j++) { jj = A->L->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->L->value[j*n + i] *= d[i]*d[i+A->L->index[j]];; } } for(j=0;j<A->U->nnd;j++) { jj = A->U->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->U->value[j*n + i] *= d[i]*d[i+A->U->index[j]];; } } #endif } else { nnd = A->nnd; #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,js,je,ii,my_rank) { nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie) for(j=0;j<nnd;j++) { jj = A->index[j]; js = _max(is,-jj); #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif k = is*nnd + j*(ie-is); ii = js-is; for(i=js;i<je;i++) { A->value[k + ii] *= d[i]*d[i+A->index[j]]; ii++; } } } #else for(j=0;j<nnd;j++) { jj = A->index[j]; js = _max(0,-jj); #ifdef USE_MPI je = jj<=(np-n)?n:_min(n,np-jj); #else je = _min(n,n-jj); #endif for(i=js;i<je;i++) { A->value[j*n + i] *= d[i]*d[i+A->index[j]]; } } #endif } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_normf_dia" LIS_INT lis_matrix_normf_dia(LIS_MATRIX A, LIS_SCALAR *nrm) { LIS_INT i,j; LIS_INT n; LIS_SCALAR sum; LIS_DEBUG_FUNC_IN; n = A->n; sum = (LIS_SCALAR)0; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->D->value[i]*A->D->value[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { sum += A->L->value[j]*A->L->value[j]; } for(j=A->U->index[i];j<A->U->index[i+1];j++) { sum += A->U->value[j]*A->U->value[j]; } } } else { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->value[i]*A->value[i]; for(j=A->index[i];j<A->index[i+1];j++) { sum += A->value[j]*A->value[j]; } } } *nrm = sqrt(sum); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_transpose_dia" LIS_INT lis_matrix_transpose_dia(LIS_MATRIX Ain, LIS_MATRIX *Aout) { LIS_DEBUG_FUNC_IN; /* err = lis_matrix_convert_dia2ccs(Ain,Aout);*/ (*Aout)->matrix_type = LIS_MATRIX_DIA; (*Aout)->status = LIS_MATRIX_DIA; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_split_dia" LIS_INT lis_matrix_split_dia(LIS_MATRIX A) { LIS_INT i,j,n,nnd; LIS_INT lnnd,unnd; #ifdef _OPENMP LIS_INT kl,ku,nprocs,my_rank,is,ie; #endif LIS_INT err; LIS_INT *lindex,*uindex; LIS_SCALAR *lvalue,*uvalue; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; n = A->n; nnd = A->nnd; lnnd = 0; unnd = 0; D = NULL; lindex = NULL; lvalue = NULL; uindex = NULL; uvalue = NULL; for(j=0;j<nnd;j++) { if( A->index[j]<0 ) { lnnd++; } else if( A->index[j]>0 ) { unnd++; } } err = lis_matrix_LU_create(A); if( err ) { return err; } err = lis_matrix_malloc_dia(n,lnnd,&lindex,&lvalue); if( err ) { return err; } err = lis_matrix_malloc_dia(n,unnd,&uindex,&uvalue); if( err ) { lis_free2(4,lindex,lvalue,uindex,uvalue); return err; } err = lis_matrix_diag_duplicateM(A,&D); if( err ) { lis_free2(4,lindex,lvalue,uindex,uvalue); return err; } #ifdef _OPENMP kl = 0; ku = 0; nprocs = omp_get_max_threads(); for(j=0;j<nnd;j++) { if( A->index[j]<0 ) { lindex[kl] = A->index[j]; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&lvalue[is*lnnd+kl*(ie-is)],&A->value[is*nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } kl++; } else if( A->index[j]>0 ) { uindex[ku] = A->index[j]; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&uvalue[is*unnd+ku*(ie-is)],&A->value[is*nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } ku++; } else { #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(i=is;i<ie;i++) { D->value[i] = A->value[is*nnd+j*(ie-is)+i-is]; } } } } #else lnnd = 0; unnd = 0; for(j=0;j<nnd;j++) { if( A->index[j]<0 ) { lindex[lnnd] = A->index[j]; for(i=0;i<n;i++) { lvalue[lnnd*n+i] = A->value[j*n+i]; } lnnd++; } else if( A->index[j]>0 ) { uindex[unnd] = A->index[j]; for(i=0;i<n;i++) { uvalue[unnd*n+i] = A->value[j*n+i]; } unnd++; } else { for(i=0;i<n;i++) { D->value[i] = A->value[j*n+i]; } } } #endif A->L->nnd = lnnd; A->L->index = lindex; A->L->value = lvalue; A->U->nnd = unnd; A->U->index = uindex; A->U->value = uvalue; A->D = D; A->is_splited = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_merge_dia" LIS_INT lis_matrix_merge_dia(LIS_MATRIX A) { LIS_INT i,j,k,n,is; #ifdef _OPENMP LIS_INT nprocs,my_rank,ie; #endif LIS_INT nnd; LIS_INT err; LIS_INT *index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = A->n; is = A->is; index = NULL; value = NULL; nnd = A->L->nnd + A->U->nnd + 1; err = lis_matrix_malloc_dia(n,nnd,&index,&value); if( err ) { return err; } #ifdef _OPENMP nprocs = omp_get_max_threads(); k = 0; for(j=0;j<A->L->nnd;j++) { index[k] = A->L->index[j]; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&value[is*nnd+k*(ie-is)],&A->L->value[is*A->L->nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } k++; } index[k] = 0; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&value[is*nnd+k*(ie-is)],&A->D->value[is],(ie-is)*sizeof(LIS_SCALAR)); } k++; for(j=0;j<A->U->nnd;j++) { index[k] = A->U->index[j]; #pragma omp parallel private(i,is,ie,my_rank) { my_rank = omp_get_thread_num(); LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memcpy(&value[is*nnd+k*(ie-is)],&A->U->value[is*A->U->nnd+j*(ie-is)],(ie-is)*sizeof(LIS_SCALAR)); } k++; } #else k = 0; for(j=0;j<A->L->nnd;j++) { index[k] = A->L->index[j]; for(i=0;i<n;i++) { value[k*n+i] = A->L->value[j*n+i]; } k++; } index[k] = 0; for(i=0;i<n;i++) { value[k*n+i] = A->D->value[i]; } k++; for(j=0;j<A->U->nnd;j++) { index[k] = A->U->index[j]; for(i=0;i<n;i++) { value[k*n+i] = A->U->value[j*n+i]; } k++; } #endif A->nnd = nnd; A->value = value; A->index = index; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_sort_dia" LIS_INT lis_matrix_sort_dia(LIS_MATRIX A) { LIS_INT i,n; LIS_DEBUG_FUNC_IN; if( !A->is_sorted ) { n = A->n; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { lis_sort_id(A->L->ptr[i],A->L->ptr[i+1]-1,A->L->index,A->L->value); lis_sort_id(A->U->ptr[i],A->U->ptr[i+1]-1,A->U->index,A->U->value); } } else { #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { lis_sort_id(A->ptr[i],A->ptr[i+1]-1,A->index,A->value); } } A->is_sorted = LIS_TRUE; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solve_dia" LIS_INT lis_matrix_solve_dia(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,n; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; b = B->value; x = X->value; switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { t = b[i]; for(j=0;j<A->L->nnd;j++) { if( i+A->L->index[j] >= 0 ) t -= A->L->value[j*n + i] * x[i + A->L->index[j]]; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { t = b[i]; for(j=0;j<A->U->nnd;j++) { if( i+A->U->index[j] < n ) t -= A->U->value[j*n + i] * x[i + A->U->index[j]]; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { t = b[i]; for(j=0;j<A->L->nnd;j++) { if( i+A->L->index[j] >= 0 ) t -= A->L->value[j*n + i] * x[i + A->L->index[j]]; } x[i] = t * A->WD->value[i]; } for(i=n-1;i>=0;i--) { t = 0.0; for(j=0;j<A->U->nnd;j++) { if( i+A->U->index[j] < n ) t += A->U->value[j*n + i] * x[i + A->U->index[j]]; } x[i] -= t * A->WD->value[i]; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solvet_dia" LIS_INT lis_matrix_solvet_dia(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,n; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; b = B->value; x = X->value; lis_vector_copy(B,X); switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { x[i] = x[i] * A->WD->value[i]; for(j=0;j<A->U->nnd;j++) { if( i+A->U->index[j] < n ) x[i + A->U->index[j]] -= A->U->value[j*n + i] * x[i]; } } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { x[i] = x[i] * A->WD->value[i]; for(j=0;j<A->L->nnd;j++) { if( i+A->L->index[j] >= 0 ) x[i + A->L->index[j]] -= A->L->value[j*n + i] * x[i]; } } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { t = x[i] * A->WD->value[i]; for(j=0;j<A->U->nnd;j++) { if( i+A->U->index[j] < n ) x[i + A->U->index[j]] -= A->U->value[j*n + i] * t; } } for(i=n-1;i>=0;i--) { x[i] = x[i] * A->WD->value[i]; t = x[i]; for(j=0;j<A->L->nnd;j++) { if( i+A->L->index[j] >= 0 ) x[i + A->L->index[j]] -= A->L->value[j*n + i] * t; } } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_crs2dia" LIS_INT lis_matrix_convert_crs2dia(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,jj,k; LIS_INT err; LIS_INT n,nnz,nnd,nprocs,my_rank; LIS_INT is,ie; LIS_INT *iw; LIS_INT *index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; nnz = Ain->nnz; index = NULL; value = NULL; iw = NULL; iw = (LIS_INT *)lis_malloc( nnz*sizeof(LIS_INT),"lis_matrix_convert_crs2dia::iw" ); if( iw==NULL ) { LIS_SETERR_MEM(nnz*sizeof(LIS_INT)); return LIS_ERR_OUT_OF_MEMORY; } lis_matrix_sort_crs(Ain); #ifdef _OPENMP #pragma omp parallel for private(i,j) #endif for(i=0;i<n;i++) { for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++) { iw[j] = Ain->index[j] - i; } } lis_sort_i(0,nnz-1,iw); nnd = 1; k = iw[0]; for(i=1;i<nnz;i++) { if( k!=iw[i] ) { k = iw[i]; nnd++; } } err = lis_matrix_malloc_dia(n,nnd,&index,&value); if( err ) { lis_free(iw); return err; } /* convert dia */ k = iw[0]; index[0] = k; j = 1; for(i=1;i<nnz;i++) { if( k!=iw[i] ) { k = iw[i]; index[j] = k; j++; } } #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,jj,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); nprocs = omp_get_max_threads(); #else my_rank = 0; nprocs = 1; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&value[is*nnd],0,(ie-is)*nnd*sizeof(LIS_SCALAR)); for(i=is;i<ie;i++) { k = 0; for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++) { jj=Ain->index[j] - i; while( jj!=index[k] ) k++; value[is*nnd + k*(ie-is) + i-is] = Ain->value[j]; } } } err = lis_matrix_set_dia(nnd,index,value,Aout); if( err ) { lis_free2(3,index,value,iw); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_free(iw); lis_matrix_storage_destroy(Aout); return err; } lis_free(iw); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_dia2crs" LIS_INT lis_matrix_convert_dia2crs(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,jj,k,l; LIS_INT err,js,je; LIS_INT n,np,nnz,nnd,is,ie,nprocs,my_rank; LIS_INT *iw; LIS_INT *ptr,*index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; np = Ain->np; nnd = Ain->nnd; is = Ain->is; ie = Ain->ie; ptr = NULL; index = NULL; value = NULL; iw = NULL; iw = (LIS_INT *)lis_malloc( (n+1)*sizeof(LIS_INT),"lis_matrix_convert_dia2crs::iw" ); if( iw==NULL ) { LIS_SETERR_MEM((n+1)*sizeof(LIS_INT)); return LIS_ERR_OUT_OF_MEMORY; } iw[0] = 0; #ifdef _OPENMP #pragma omp parallel private(i,j,k,is,ie,js,je,jj,my_rank) #endif { #ifdef _OPENMP nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); #else my_rank = 0; nprocs = 1; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); memset(&iw[is+1],0,(ie-is)*sizeof(LIS_INT)); k = ie-is; for(j=0;j<nnd;j++) { jj = Ain->index[j]; js = _max(is,-jj) -is; #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif je -= is; for(i=js;i<je;i++) { if( Ain->value[is*nnd + j*k + i]!=(LIS_SCALAR)0.0 ) { iw[i+is+1]++; } } } } for(i=0;i<n;i++) { iw[i+1] += iw[i]; } nnz = iw[n]; err = lis_matrix_malloc_crs(n,nnz,&ptr,&index,&value); if( err ) { lis_free2(4,ptr,index,value,iw); return err; } /* convert crs */ ptr[0] = 0; #ifdef _OPENMP #pragma omp parallel private(i,j,k,l,is,ie,js,je,jj,my_rank) #endif { #ifdef _OPENMP nprocs = omp_get_max_threads(); my_rank = omp_get_thread_num(); #else my_rank = 0; nprocs = 1; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); l = ie-is; for(i=is;i<ie;i++) { ptr[i+1] = iw[i+1]; } for(j=0;j<nnd;j++) { jj = Ain->index[j]; js = _max(is,-jj) - is; #ifdef USE_MPI je = jj<=(np-n)?ie:_min(ie,np-jj); #else je = _min(ie,n-jj); #endif je -= is; for(i=js;i<je;i++) { if( Ain->value[is*nnd + j*l + i]!=(LIS_SCALAR)0.0 ) { k = iw[i+is]++; value[k] = Ain->value[is*nnd + j*l + i]; index[k] = i+is + jj; } } } } err = lis_matrix_set_crs(nnz,ptr,index,value,Aout); if( err ) { lis_free2(4,ptr,index,value,iw); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_free(iw); lis_matrix_storage_destroy(Aout); return err; } lis_free(iw); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
pi_parallel.c
/* *Parallelized version of pi_serial. Compare performance with time ./pi_parallel * and time ./pi_serial * * Author: Matt Cufari * Version 1.1.0 * Date Created: Dec 18 2020 * Date Last Modified: Jan 4 2021 * */ #include <omp.h> #include <stdio.h> #define NUM_THREADS 2 //Thread count macro static long num_steps = 1000000; //number of rectangles double step; //dx int main(){ int i; //index of rectangle int nthreads; //number of threads double pi = 0; //value of pi double sum[NUM_THREADS]; //Sum is now an array and the riemann sum is SUM over sum[] step=1.0/(double) num_steps; //dx omp_set_num_threads(NUM_THREADS); //Set number of threads #pragma omp parallel //Parallel block { int i; //index of rectangel (thread local) int id; //id of thread (thread local) int nthrds; //number of threads (thread local) double x; //Position on x-axis (thread local) id = omp_get_thread_num(); //Get id nthrds = omp_get_num_threads(); //Get num_threads necessary for complex reason if(id == 0) { nthreads = nthrds; //Set the global variable. See loop over sum } for(i=id, sum[id]=0.0; i<num_steps; i=i+nthrds){ //i += nthrds !!! x = (i+0.5)*step; //midpoint of rectangle sum[id] += 4.0 / (1.0+x*x); //Same as before } } //End parallel block for(i = 0, pi=0.0; i < nthreads; i++){ pi += sum[i]*step; //SUM over sum[] (* dx) } printf("The value of pi is: %f\n", pi); return 0; }
copyprivate-clause.c
/* * copyprivate-clause.c * * Created on: 09/04/2014 * Author: Carlos de la Torre */ #include <stdio.h> #include <omp.h> int main() { int n = 9, i, b[n]; for (i = 0; i < n; i++) b[i] = -1; #pragma omp parallel { int a; #pragma omp single copyprivate(a) { printf("\nIntroduce valor de inicialización a: "); scanf("%d", &a ); printf("\nSingle ejecutada por el thread %d\n",omp_get_thread_num()); } #pragma omp for for (i = 0; i < n; i++) b[i] = a; } printf("Depués de la región parallel:\n"); for (i = 0; i < n; i++) printf("b[%d] = %d\t", i, b[i]); printf("\n"); return 0; }
rootfinder_initial_guess_mex.c
#include "math.h" #include "mex.h" #include "matrix.h" #include "linequad.h" /* The gateway function */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Read input int n = mxGetNumberOfElements(prhs[0]); double* tj = mxGetPr(prhs[0]); double* xj = mxGetPr(prhs[1]); double* yj = mxGetPr(prhs[2]); double* zj = mxGetPr(prhs[3]); int N = mxGetNumberOfElements(prhs[4]); double* x0 = mxGetPr(prhs[4]); double* y0 = mxGetPr(prhs[5]); double* z0 = mxGetPr(prhs[6]); // Prepare output plhs[0] = mxCreateDoubleMatrix(N, 1, mxCOMPLEX); double* tinit_re_p = mxGetPr(plhs[0]); double* tinit_im_p = mxGetPi(plhs[0]); // Call function for all inputs #pragma omp parallel for schedule(guided) for (int i=0; i<N; i++) { double complex tinit = rootfinder_initial_guess(tj, xj, yj, zj, n, x0[i], y0[i], z0[i]); tinit_re_p[i] = creal(tinit); tinit_im_p[i] = cimag(tinit); } }
kriging.h
/* Copyright 2009 HPGL Team This file is part of HPGL (High Perfomance Geostatistics Library). HPGL is free software: you can redistribute it and/or modify it under the terms of the BSD License. You should have received a copy of the BSD License along with HPGL. */ #ifndef __KRIGING_H__69012DAB_4BA5_44E4_9DF9_749A5519E0AD #define __KRIGING_H__69012DAB_4BA5_44E4_9DF9_749A5519E0AD namespace hpgl { /**/ template< typename values_t, typename defineds_t, typename coord_t, typename means_t, typename covariances_t, typename neighbourhood_lookup_t, typename weight_calculator_t, typename result_t> void kriging( std::vector<values_t*> values, std::vector<defineds_t*> defindes, const std::vector<params_t> & params, const reducer_t & reducer, const undefined_handler ) { size_t size = input_property.size(); #pragma omp parallel { #pragma omp for for (node_index_t node_idx = 0; node_idx < size; ++node_idx) { struct result_t { double value; bool defined; } std::vector<result_t> result; std::vector<indicator_probability_t> probs; for (int idx = 0; idx < params.m_category_count; ++idx) { indicator_probability_t prob; ki_result_t ki_result = kriging_interpolation( *params[idx].input_values, *params[idx].defineds, node_idx, grid[node_idx], *covariances[idx], *mps[idx], nblookups[idx], weight_calculator(sk_constraints, input_property), prob); if (ki_result != KI_SUCCESS) { result_t res = {*mps[idx][node_idx], false}; result.push_back(res) } else { result_t res = {prob, true}; result.push_back_t(res); } } output_poperty.set_at(node_idx, reducer(result)); #pragma omp critical { report.next_lap(); } } } report.stop(); } } #endif //__KRIGING_H__69012DAB_4BA5_44E4_9DF9_749A5519E0AD
oracle12c_fmt_plug.c
/* * This software is Copyright (c) 2015, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * https://www.trustwave.com/Resources/SpiderLabs-Blog/Changes-in-Oracle-Database-12c-password-hashes/ * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_oracle12c; #elif FMT_REGISTERS_H john_register_one(&fmt_oracle12c); #else #include <openssl/sha.h> #include <string.h> #include "arch.h" //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 //#undef _OPENMP #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "sha2.h" #include "pbkdf2_hmac_sha512.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "Oracle12C" #define FORMAT_NAME "" #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "PBKDF2-SHA512 " SHA512_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA512 32/" ARCH_BITS_STR #endif #define PLAINTEXT_LENGTH 125 // XXX #define CIPHERTEXT_LENGTH 160 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define BINARY_SIZE 64 #define BINARY_ALIGN sizeof(uint32_t) #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64 * SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64 * SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define FORMAT_TAG "$oracle12c$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct fmt_tests tests[] = { {"$oracle12c$e3243b98974159cc24fd2c9a8b30ba62e0e83b6ca2fc7c55177c3a7f82602e3bdd17ceb9b9091cf9dad672b8be961a9eac4d344bdba878edc5dcb5899f689ebd8dd1be3f67bff9813a464382381ab36b", "epsilon"}, {"$oracle12c$eda9535a516d5c7c75ef250f8b1b5fadc023ebfdad9b8d46f023b283cabc06f822e6db556a131d8f87fb427e6a7d592ca69b0e4eef22648aa7ba00afee786a8745057545117145650771143408825746", "18445407"}, {NULL} }; static struct custom_salt { int saltlen; unsigned char salt[16 + 22 + 1]; } *cur_salt; #ifdef SIMD_COEF_64 static char (*saved_key)[SHA_BUF_SIZ*sizeof(uint64_t)]; #else static char (*saved_key)[PLAINTEXT_LENGTH + 1]; #endif static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (strncasecmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH)) return 0; if (strlen(ciphertext) > (FORMAT_TAG_LENGTH + CIPHERTEXT_LENGTH)) return 0; p = strrchr(ciphertext, '$'); if (!p) return 0; p = p + 1; if (strlen(p) != (BINARY_SIZE * 2 + 32)) return 0; if (!ishexlc(p)) goto error; return 1; error: return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int i; memset(&cs, 0, sizeof(cs)); p = ciphertext + FORMAT_TAG_LENGTH + 2 * BINARY_SIZE; // AUTH_VFR_DATA is variable, and 16 bytes in length for (i = 0; i < 16; i++) cs.salt[i] = (atoi16[ARCH_INDEX(p[2*i])] << 4) | atoi16[ARCH_INDEX(p[2*i+1])]; strncpy((char*)cs.salt + 16, "AUTH_PBKDF2_SPEEDY_KEY", 22); // add constant string to the salt cs.saltlen = 16 + 22; return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; int i; char *p; p = ciphertext + FORMAT_TAG_LENGTH; for (i = 0; i < BINARY_SIZE && *p; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { int index; const int count = *pcount; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { SHA512_CTX ctx; int i = 0; #if SIMD_COEF_64 int lens[SSE_GROUP_SZ_SHA512]; unsigned char *pin[SSE_GROUP_SZ_SHA512]; union { uint32_t *pout[SSE_GROUP_SZ_SHA512]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA512; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = (uint32_t*)(crypt_out[index+i]); } pbkdf2_sha512_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->saltlen, 4096, &(x.poutc), BINARY_SIZE, 0); #else pbkdf2_sha512((const unsigned char*)saved_key[index], strlen(saved_key[index]), cur_salt->salt, cur_salt->saltlen, 4096, (unsigned char*)crypt_out[index], BINARY_SIZE, 0); #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) #endif { SHA512_Init(&ctx); SHA512_Update(&ctx, (unsigned char*)crypt_out[index + i], BINARY_SIZE); SHA512_Update(&ctx, cur_salt->salt, 16); // AUTH_VFR_DATA first 16 bytes SHA512_Final((unsigned char*)crypt_out[index + i], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_oracle12c = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
geopm_sched.c
/* * Copyright (c) 2015, 2016, 2017, 2018, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #ifdef __APPLE__ #define _DARWIN_C_SOURCE #include <sys/types.h> #include <sys/sysctl.h> #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include <stdint.h> #include <unistd.h> #include <pthread.h> #include <errno.h> #include <string.h> #include <signal.h> #include "geopm_sched.h" #include "geopm_error.h" #include "config.h" #ifdef _OPENMP #include <omp.h> #endif static volatile unsigned g_is_popen_complete = 0; static struct sigaction g_popen_complete_signal_action; static void geopm_sched_popen_complete(int signum) { if (signum == SIGCHLD) { g_is_popen_complete = 1; } } int geopm_sched_popen(const char *cmd, FILE **fid) { int err = 0; *fid = NULL; struct sigaction save_action; g_popen_complete_signal_action.sa_handler = geopm_sched_popen_complete; sigemptyset(&g_popen_complete_signal_action.sa_mask); g_popen_complete_signal_action.sa_flags = 0; err = sigaction(SIGCHLD, &g_popen_complete_signal_action, &save_action); if (!err) { *fid = popen(cmd, "r"); while (*fid && !g_is_popen_complete) { } g_is_popen_complete = 0; sigaction(SIGCHLD, &save_action, NULL); } if (!err && *fid == NULL) { err = errno ? errno : GEOPM_ERROR_RUNTIME; } return err; } #ifndef __APPLE__ int geopm_sched_num_cpu(void) { return sysconf(_SC_NPROCESSORS_CONF); } int geopm_sched_get_cpu(void) { return sched_getcpu(); } static pthread_once_t g_proc_cpuset_once = PTHREAD_ONCE_INIT; static cpu_set_t *g_proc_cpuset = NULL; static size_t g_proc_cpuset_size = 0; /* If /proc/self/status is usable and correct then parse this file to determine the process affinity. */ #ifdef GEOPM_PROCFS int geopm_sched_proc_cpuset_helper(int num_cpu, uint32_t *proc_cpuset, FILE *fid) { const char *key = "Cpus_allowed:"; const size_t key_len = strlen(key); const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0); int err = 0; char *line = NULL; size_t line_len = 0; int read_idx = 0; while ((getline(&line, &line_len, fid)) != -1) { if (strncmp(line, key, key_len) == 0) { char *line_ptr = line + key_len; /* On some systems we have seen the mask padded with zeros beyond the number of online CPUs. Deal with this by skipping extra leading 32 bit masks */ int num_comma = 0; char *comma_ptr = line_ptr; while ((comma_ptr = strchr(comma_ptr, ','))) { ++comma_ptr; ++num_comma; } if (num_comma > num_read - 1) { num_comma -= num_read - 1; for (int i = 0; !err && i < num_comma; ++i) { line_ptr = strchr(line_ptr, ','); if (!line_ptr) { err = GEOPM_ERROR_LOGIC; } else { ++line_ptr; } } } for (read_idx = num_read - 1; !err && read_idx >= 0; --read_idx) { int num_match = sscanf(line_ptr, "%x", proc_cpuset + read_idx); if (num_match != 1) { err = GEOPM_ERROR_RUNTIME; } else { line_ptr = strchr(line_ptr, ','); if (read_idx != 0 && line_ptr == NULL) { err = GEOPM_ERROR_RUNTIME; } else { ++line_ptr; } } } } } if (line) { free(line); } if (read_idx != -1) { err = GEOPM_ERROR_RUNTIME; } return err; } static void geopm_proc_cpuset_once(void) { const char *status_path = "/proc/self/status"; const int num_cpu = geopm_sched_num_cpu(); const int num_read = num_cpu / 32 + (num_cpu % 32 ? 1 : 0); int err = 0; uint32_t *proc_cpuset = NULL; FILE *fid = NULL; g_proc_cpuset = CPU_ALLOC(num_cpu); if (g_proc_cpuset == NULL) { err = ENOMEM; } if (!err) { g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu); proc_cpuset = calloc(num_read, sizeof(*proc_cpuset)); if (proc_cpuset == NULL) { err = ENOMEM; } } if (!err) { fid = fopen(status_path, "r"); if (!fid) { err = errno ? errno : GEOPM_ERROR_RUNTIME; } } if (!err) { err = geopm_sched_proc_cpuset_helper(num_cpu, proc_cpuset, fid); fclose(fid); } if (!err) { memcpy(g_proc_cpuset, proc_cpuset, g_proc_cpuset_size); } else if (g_proc_cpuset) { for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset); } } if (proc_cpuset) { free(proc_cpuset); } } /* If /proc/self/status is not available spawn a pthread requesting an open affinity mask and then have the thread query the affinity mask enforced by the OS using sched_getaffinity(). */ #else /* GEOPM_PROCFS */ static void *geopm_proc_cpuset_pthread(void *arg) { void *result = NULL; int err = sched_getaffinity(0, g_proc_cpuset_size, g_proc_cpuset); if (err) { result = (void *)(size_t)(errno ? errno : GEOPM_ERROR_RUNTIME); } return result; } static void geopm_proc_cpuset_once(void) { int err = 0; int num_cpu = geopm_sched_num_cpu(); pthread_t tid; pthread_attr_t attr; g_proc_cpuset = CPU_ALLOC(num_cpu); if (g_proc_cpuset == NULL) { err = ENOMEM; } if (!err) { g_proc_cpuset_size = CPU_ALLOC_SIZE(num_cpu); for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset); } err = pthread_attr_init(&attr); } if (!err) { err = pthread_attr_setaffinity_np(&attr, g_proc_cpuset_size, g_proc_cpuset); } if (!err) { err = pthread_create(&tid, &attr, geopm_proc_cpuset_pthread, NULL); } if (!err) { void *result = NULL; err = pthread_join(tid, &result); if (!err && result) { err = (int)(size_t)result; } } if (err && err != ENOMEM) { for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, g_proc_cpuset_size, g_proc_cpuset); } } if (!err) { err = pthread_attr_destroy(&attr); } } #endif /* GEOPM_PROCFS */ int geopm_sched_proc_cpuset(int num_cpu, cpu_set_t *proc_cpuset) { int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once); int sched_num_cpu = geopm_sched_num_cpu(); size_t cpuset_size = CPU_ALLOC_SIZE(num_cpu); if (!err && cpuset_size < g_proc_cpuset_size) { err = GEOPM_ERROR_INVALID; } if (!err) { memcpy(proc_cpuset, g_proc_cpuset, g_proc_cpuset_size); for (int i = sched_num_cpu; i < num_cpu; ++i) { CPU_CLR_S(i, cpuset_size, proc_cpuset); } } return err; } int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp) { /*! @brief Function that returns a cpuset that has bits set for all CPUs enabled for the process which are not used by OpenMP. Rather than returning an empty mask, if all CPUs allocated for the process are used by OpenMP, then the woomp mask will have all bits set. */ int err = pthread_once(&g_proc_cpuset_once, geopm_proc_cpuset_once); int sched_num_cpu = geopm_sched_num_cpu(); size_t req_alloc_size = CPU_ALLOC_SIZE(num_cpu); if (!err && !g_proc_cpuset) { err = ENOMEM; } if (!err && req_alloc_size < g_proc_cpuset_size) { err = EINVAL; } if (!err) { /* Copy the process CPU mask into the output. */ memcpy(woomp, g_proc_cpuset, g_proc_cpuset_size); /* Start an OpenMP parallel region and have each thread clear its bit from the mask. */ #ifdef _OPENMP #pragma omp parallel default(shared) { #pragma omp critical { int cpu_index = sched_getcpu(); if (cpu_index != -1 && cpu_index < num_cpu) { /* Clear the bit for this OpenMP thread's CPU. */ CPU_CLR_S(cpu_index, g_proc_cpuset_size, woomp); } else { err = errno ? errno : GEOPM_ERROR_LOGIC; } } /* end pragma omp critical */ } /* end pragma omp parallel */ #endif /* _OPENMP */ } if (!err) { for (int i = sched_num_cpu; i < num_cpu; ++i) { CPU_CLR_S(i, req_alloc_size, woomp); } } if (err || CPU_COUNT_S(g_proc_cpuset_size, woomp) == 0) { /* If all CPUs are used by the OpenMP gang, then leave the mask open and allow the Linux scheduler to choose. */ for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, g_proc_cpuset_size, woomp); } } return err; } #else /* __APPLE__ */ void __cpuid(uint32_t*, int); int geopm_sched_num_cpu(void) { uint32_t result = 1; size_t len = sizeof(result); sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &result, &len, NULL, 0); return result; } int geopm_sched_get_cpu(void) { int result = -1; uint32_t cpu_info[4]; __cpuid(cpu_info, 1); // Check APIC if (cpu_info[3] & (1 << 9)) { result = (int)(cpu_info[1] >> 24); } return result; } // On Mac OS just fill in all bits for the cpuset for both the process // mask and woomp to get the tests passing. static void geopm_cpuset_fill(int num_cpu, cpu_set_t *proc_cpuset) { size_t cpuset_size = CPU_ALLOC_SIZE(num_cpu); for (int i = 0; i < num_cpu; ++i) { CPU_SET_S(i, cpuset_size, proc_cpuset); } } int geopm_sched_proc_cpuset(int num_cpu, cpu_set_t *proc_cpuset) { geopm_cpuset_fill(num_cpu, proc_cpuset); return 0; } int geopm_sched_woomp(int num_cpu, cpu_set_t *woomp) { geopm_cpuset_fill(num_cpu, woomp); return 0; } #endif /* __APPLE__ */
rt_dsyssq.c
#include "runtime.h" void RT_CORE_dsyssq_f1( Quark *quark, Quark_Task_Flags *task_flags, PLASMA_enum uplo, int n, const double *A, int lda, double *scale, double *sumsq, double *fake, int szeF, int paramF ) { plasma_context_t *plasma; plasma = plasma_context_self(); if ( plasma->runtime == PLASMA_QUARK ) { QUARK_CORE_dsyssq_f1(quark, task_flags, uplo, n, A, lda, scale, sumsq, fake, szeF, paramF); } else if ( plasma->runtime == PLASMA_OMPSS) { if ( (fake == scale) && (paramF & GATHERV) ) { #pragma omp target device (smp) copy_deps #pragma omp task in([lda*n]A) inout([1]scale, [1]sumsq) label(dsyssq) CORE_dsyssq(uplo, n, A, lda, scale, sumsq); } else { #pragma omp target device (smp) copy_deps #pragma omp task in([lda*n]A) inout([1]scale, [1]sumsq) concurrent([szeF]fake) label(dsyssq) CORE_dsyssq(uplo, n, A, lda, scale, sumsq); } } }
Fig_10.4_loopCollapse.c
// sample compile command: "gcc -fopenmp -c Fig_10.4_loopCollapse.c" to generate *.o object file #include <omp.h> // apply a function (*MFUNC) to each element of an N by M array void Apply(int N, int M, float* A, void(*MFUNC)(int, int, float*)) { #pragma omp parallel for num_threads(4) collapse(2) if(N*M>100) for (int i = 0; i < N; i++) for (int j = 0; j < M; j++) MFUNC(i, j, (A+i*M+j)); }
lis_vector.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" /************************************************ * lis_vector_init * lis_vector_create * lis_vector_duplicate * lis_vector_destroy * lis_vector_set_value * lis_vector_set_values * lis_vector_set_values2 * lis_vector_get_value * lis_vector_get_values * lis_vector_get_range * lis_vector_get_size * lis_vector_scatter * lis_vector_gather ************************************************/ #undef __FUNC__ #define __FUNC__ "lis_vector_init" LIS_INT lis_vector_init(LIS_VECTOR *vec) { LIS_DEBUG_FUNC_IN; memset(*vec,0,sizeof(struct LIS_VECTOR_STRUCT)); (*vec)->status = LIS_VECTOR_NULL; (*vec)->is_destroy = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_check" LIS_INT lis_vector_check(LIS_VECTOR v, LIS_INT level) { LIS_DEBUG_FUNC_IN; switch( level ) { case LIS_VECTOR_CHECK_NULL: if( !lis_is_malloc(v) ) { LIS_SETERR(LIS_ERR_ILL_ARG,"vector v is undefined\n"); return LIS_ERR_ILL_ARG; } break; default: if( !lis_is_malloc(v) ) { LIS_SETERR(LIS_ERR_ILL_ARG,"vector v is undefined\n"); return LIS_ERR_ILL_ARG; } if( v->status<=LIS_VECTOR_ASSEMBLING ) { LIS_SETERR(LIS_ERR_ILL_ARG,"vector v is assembling\n"); return LIS_ERR_ILL_ARG; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_create" LIS_INT lis_vector_create(LIS_Comm comm, LIS_VECTOR *vec) { LIS_INT err; LIS_DEBUG_FUNC_IN; err = lis_vector_createex(LIS_PRECISION_DEFAULT,comm,vec); if( err ) return err; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_createex" LIS_INT lis_vector_createex(LIS_INT precision, LIS_Comm comm, LIS_VECTOR *vec) { LIS_DEBUG_FUNC_IN; *vec = NULL; *vec = (LIS_VECTOR)lis_malloc( sizeof(struct LIS_VECTOR_STRUCT),"lis_vector_createex::vec" ); if( NULL==*vec ) { LIS_SETERR_MEM(sizeof(struct LIS_VECTOR_STRUCT)); return LIS_OUT_OF_MEMORY; } lis_vector_init(vec); (*vec)->status = LIS_VECTOR_NULL; (*vec)->precision = precision; (*vec)->comm = comm; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_set_size" LIS_INT lis_vector_set_size(LIS_VECTOR vec, LIS_INT local_n, LIS_INT global_n) { LIS_INT nprocs,my_rank; LIS_INT is,ie; LIS_INT i,err,precision; LIS_INT *ranges; LIS_DEBUG_FUNC_IN; if( global_n>0 && local_n>global_n ) { LIS_SETERR2(LIS_ERR_ILL_ARG,"local n(=%d) is larger than global n(=%d)\n",local_n,global_n); return LIS_ERR_ILL_ARG; } if( local_n<0 || global_n<0 ) { LIS_SETERR2(LIS_ERR_ILL_ARG,"local n(=%d) or global n(=%d) are less than 0\n",local_n,global_n); return LIS_ERR_ILL_ARG; } if( local_n==0 && global_n==0 ) { LIS_SETERR2(LIS_ERR_ILL_ARG,"local n(=%d) and global n(=%d) are 0\n",local_n,global_n); return LIS_ERR_ILL_ARG; } err = lis_ranges_create(vec->comm,&local_n,&global_n,&ranges,&is,&ie,&nprocs,&my_rank); if( err ) { return err; } vec->ranges = ranges; precision = vec->precision; if( !precision ) { vec->value = (LIS_SCALAR *)lis_malloc( local_n*sizeof(LIS_SCALAR),"lis_vector_set_size::vec->value" ); if( NULL==vec->value ) { LIS_SETERR_MEM(local_n*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<local_n;i++) { vec->value[i] = 0.0; } } else { vec->value = (LIS_SCALAR *)lis_malloc( (2*local_n + local_n%2)*sizeof(LIS_SCALAR),"lis_vector_set_size::vec->value" ); if( NULL==vec->value ) { LIS_SETERR_MEM((2*local_n+local_n%2)*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } vec->value_lo = vec->value + local_n + local_n%2; vec->work = (LIS_SCALAR *)lis_malloc( 32*sizeof(LIS_SCALAR),"lis_vector_set_size::vec->work" ); if( NULL==vec->work ) { LIS_SETERR_MEM(32*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<local_n;i++) { vec->value[i] = 0.0; vec->value_lo[i] = 0.0; } } vec->is_copy = LIS_TRUE; vec->status = LIS_VECTOR_ASSEMBLED; vec->n = local_n; vec->gn = global_n; vec->np = local_n; vec->my_rank = my_rank; vec->nprocs = nprocs; vec->is = is; vec->ie = ie; vec->origin = LIS_ORIGIN_0; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_reuse" LIS_INT lis_vector_reuse(LIS_VECTOR *vec) { LIS_INT err,np,precision; LIS_DEBUG_FUNC_IN; err = lis_vector_check(*vec,LIS_VECTOR_CHECK_NULL); if( err ) return err; np = (*vec)->np; if( (*vec)->status==LIS_VECTOR_NULL ) { precision = ((LIS_VECTOR)*vec)->precision; if( !precision ) { (*vec)->value = (LIS_SCALAR *)lis_malloc( np*sizeof(LIS_SCALAR),"lis_vector_reuse::vec->value" ); if( NULL==(*vec)->value ) { LIS_SETERR_MEM(np*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } (*vec)->is_copy = LIS_TRUE; } else { (*vec)->value = (LIS_SCALAR *)lis_malloc( (2*np+np%2)*sizeof(LIS_SCALAR),"lis_vector_reuse::vec->value" ); if( NULL==(*vec)->value ) { LIS_SETERR_MEM((2*np+np%2)*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } (*vec)->is_copy = LIS_TRUE; (*vec)->value_lo = (*vec)->value + np + np%2; (*vec)->work = (LIS_SCALAR *)lis_malloc( 32*sizeof(LIS_SCALAR),"lis_vector_reuse::vec->work" ); if( NULL==(*vec)->work ) { LIS_SETERR_MEM(32*sizeof(LIS_SCALAR)); lis_vector_destroy(*vec); *vec = NULL; return LIS_OUT_OF_MEMORY; } } } (*vec)->status = LIS_VECTOR_ASSEMBLED; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_unset" LIS_INT lis_vector_unset(LIS_VECTOR vec) { LIS_INT err; LIS_DEBUG_FUNC_IN; err = lis_vector_check(vec,LIS_VECTOR_CHECK_NULL); if( err ) return err; if( vec->is_copy ) lis_free(vec->value); vec->value = NULL; vec->status = LIS_VECTOR_NULL; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_set" LIS_INT lis_vector_set(LIS_VECTOR vec, LIS_SCALAR *value) { LIS_INT err; LIS_INT n,np; LIS_DEBUG_FUNC_IN; err = lis_vector_check(vec,LIS_VECTOR_CHECK_NULL); if( err ) return err; n = vec->n; np = vec->np; if( vec->is_destroy ) lis_free(vec->value); vec->value = value; vec->is_copy = LIS_FALSE; vec->status = LIS_VECTOR_ASSEMBLING; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_destroy" LIS_INT lis_vector_destroy(LIS_VECTOR vec) { LIS_DEBUG_FUNC_IN; if( lis_is_malloc(vec) ) { if( vec->value && vec->is_destroy ) lis_free( vec->value ); if( vec->work ) lis_free( vec->work ); if( vec->ranges ) lis_free( vec->ranges ); if( vec ) lis_free(vec); } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_duplicate" LIS_INT lis_vector_duplicate(void *vin, LIS_VECTOR *vout) { LIS_INT precision,err; LIS_DEBUG_FUNC_IN; precision = LIS_PRECISION_DEFAULT; if( ((LIS_VECTOR)vin)->label==LIS_LABEL_VECTOR) { precision = ((LIS_VECTOR)vin)->precision; } else if( ((LIS_VECTOR)vin)->label!=LIS_LABEL_MATRIX) { LIS_SETERR(LIS_ERR_ILL_ARG, "First argument is not LIS_VECTOR or LIS_MATRIX\n"); return LIS_ERR_ILL_ARG; } err = lis_vector_duplicateex(precision,vin,vout); LIS_DEBUG_FUNC_OUT; return err; } #undef __FUNC__ #define __FUNC__ "lis_vector_duplicateex" LIS_INT lis_vector_duplicateex(LIS_INT precision, void *A, LIS_VECTOR *vout) { LIS_INT n,np,pad; LIS_INT nprocs; LIS_INT i; #ifdef USE_MPI LIS_INT *ranges; #endif LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; if( ((LIS_VECTOR)A)->label!=LIS_LABEL_VECTOR && ((LIS_VECTOR)A)->label!=LIS_LABEL_MATRIX) { LIS_SETERR(LIS_ERR_ILL_ARG, "Second argument is not LIS_VECTOR or LIS_MATRIX\n"); return LIS_ERR_ILL_ARG; } nprocs = ((LIS_VECTOR)A)->nprocs; n = ((LIS_VECTOR)A)->n; np = ((LIS_VECTOR)A)->np; pad = ((LIS_VECTOR)A)->pad; *vout = NULL; *vout = (LIS_VECTOR)lis_malloc( sizeof(struct LIS_VECTOR_STRUCT),"lis_vector_duplicateex::vout" ); if( NULL==*vout ) { LIS_SETERR_MEM(sizeof(struct LIS_VECTOR_STRUCT)); return LIS_OUT_OF_MEMORY; } lis_vector_init(vout); if( !precision ) { value = (LIS_SCALAR *)lis_malloc( (np+pad)*sizeof(LIS_SCALAR),"lis_vector_duplicateex::value" ); if( NULL==value ) { LIS_SETERR_MEM((np+pad)*sizeof(LIS_SCALAR)); lis_vector_destroy(*vout); *vout = NULL; return LIS_OUT_OF_MEMORY; } (*vout)->value = value; #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<np+pad;i++) { (*vout)->value[i] = 0.0; } } else { value = (LIS_SCALAR *)lis_malloc( (2*(np+pad) + (np+pad)%2)*sizeof(LIS_SCALAR),"lis_vector_duplicateex::value" ); if( NULL==value ) { LIS_SETERR_MEM((2*(np+pad) + (np+pad)%2)*sizeof(LIS_SCALAR)); lis_vector_destroy(*vout); *vout = NULL; return LIS_OUT_OF_MEMORY; } (*vout)->value = value; (*vout)->value_lo = value + np+pad + (np+pad)%2; (*vout)->work = (LIS_SCALAR *)lis_malloc( 32*sizeof(LIS_SCALAR),"lis_vector_duplicateex::vout->work" ); if( NULL==(*vout)->work ) { LIS_SETERR_MEM(32*sizeof(LIS_SCALAR)); lis_vector_destroy(*vout); *vout = NULL; return LIS_OUT_OF_MEMORY; } #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<np+pad;i++) { (*vout)->value[i] = 0.0; (*vout)->value_lo[i] = 0.0; } } #ifdef USE_MPI ranges = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_vector_duplicateex::ranges" ); if( ranges==NULL ) { LIS_SETERR_MEM((nprocs+1)*sizeof(LIS_INT)); lis_vector_destroy(*vout); *vout = NULL; return LIS_OUT_OF_MEMORY; } for(i=0;i<nprocs+1;i++) ranges[i] = ((LIS_VECTOR)A)->ranges[i]; (*vout)->ranges = ranges; #else (*vout)->ranges = NULL; #endif (*vout)->is_copy = LIS_TRUE; (*vout)->status = LIS_VECTOR_ASSEMBLED; (*vout)->precision = precision; (*vout)->n = ((LIS_VECTOR)A)->n; (*vout)->gn = ((LIS_VECTOR)A)->gn; (*vout)->np = ((LIS_VECTOR)A)->np; (*vout)->pad = ((LIS_VECTOR)A)->pad; (*vout)->comm = ((LIS_VECTOR)A)->comm; (*vout)->my_rank = ((LIS_VECTOR)A)->my_rank; (*vout)->nprocs = ((LIS_VECTOR)A)->nprocs; (*vout)->is = ((LIS_VECTOR)A)->is; (*vout)->ie = ((LIS_VECTOR)A)->ie; (*vout)->origin = ((LIS_VECTOR)A)->origin; (*vout)->is_destroy = ((LIS_VECTOR)A)->is_destroy; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_set_value0" LIS_INT lis_vector_set_value0(LIS_INT flag, LIS_INT i, LIS_SCALAR value, LIS_VECTOR v) { LIS_INT n,np,gn,is,ie; LIS_DEBUG_FUNC_IN; np = v->np; n = v->n; gn = v->gn; is = v->is; ie = v->ie; if( v->origin ) i--; if( i<is || i>=ie ) { if( v->origin ) { is++; ie++; i++; } LIS_SETERR3(LIS_ERR_ILL_ARG, "i(=%d) is less than %d or larger than %d\n",i,is,ie); return LIS_ERR_ILL_ARG; } if(v->status==LIS_VECTOR_NULL) { v->value = (LIS_SCALAR *)lis_malloc( np*sizeof(LIS_SCALAR),"lis_vector_set_value::v->value" ); if( NULL==v->value ) { LIS_SETERR_MEM(np*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } v->is_copy = LIS_TRUE; v->status = LIS_VECTOR_ASSEMBLING; } if(flag==LIS_INS_VALUE) { v->value[i-is] = value; } else { v->value[i-is] += value; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_set_value" LIS_INT lis_vector_set_value(LIS_INT flag, LIS_INT i, LIS_SCALAR value, LIS_VECTOR v) { LIS_INT n,np,gn,is,ie; LIS_DEBUG_FUNC_IN; np = v->np; n = v->n; gn = v->gn; is = v->is; ie = v->ie; if( v->origin ) i--; if( i<is || i>=ie ) { if( v->origin ) { is++; ie++; i++; } LIS_SETERR3(LIS_ERR_ILL_ARG, "i(=%d) is less than %d or larger than %d\n",i,is,ie); return LIS_ERR_ILL_ARG; } if(v->status==LIS_VECTOR_NULL) { v->value = (LIS_SCALAR *)lis_malloc( np*sizeof(LIS_SCALAR),"lis_vector_set_value::v->value" ); if( NULL==v->value ) { LIS_SETERR_MEM(np*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } v->is_copy = LIS_TRUE; v->status = LIS_VECTOR_ASSEMBLING; } if(flag==LIS_INS_VALUE) { v->value[i-is] = value; } else { v->value[i-is] += value; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_set_values" LIS_INT lis_vector_set_values(LIS_INT flag, LIS_INT count, LIS_INT index[], LIS_SCALAR value[], LIS_VECTOR v) { LIS_INT n,np,gn,i,ii,is,ie; LIS_DEBUG_FUNC_IN; np = v->np; n = v->n; gn = v->gn; is = v->is; ie = v->ie; if(v->status==LIS_VECTOR_NULL) { v->value = (LIS_SCALAR *)lis_malloc( np*sizeof(LIS_SCALAR),"lis_vector_set_values::v->value" ); if( NULL==v->value ) { LIS_SETERR_MEM(np*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } v->is_copy = LIS_TRUE; v->status = LIS_VECTOR_ASSEMBLING; } if(flag==LIS_INS_VALUE) { for(i=0;i<count;i++) { ii = index[i]; if( v->origin ) ii--; if( ii<is || ii>=ie ) { if( v->origin ) { is++; ie++; ii++; i++; } LIS_SETERR4(LIS_ERR_ILL_ARG, "index[%d](=%d) is less than %d or larger than %d\n",i,ii,is,ie); return LIS_ERR_ILL_ARG; } v->value[ii-is] = value[i]; } } else { for(i=0;i<count;i++) { ii = index[i]; if( v->origin ) ii++; if( ii<is || ii>=ie ) { if( v->origin ) { is++; ie++; ii++; i++; } LIS_SETERR4(LIS_ERR_ILL_ARG, "index[%d](=%d) is less than %d or larger than %d\n",i,ii,is,ie); return LIS_ERR_ILL_ARG; } v->value[ii-is] += value[i]; } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_set_values" LIS_INT lis_vector_set_values2(LIS_INT flag, LIS_INT start, LIS_INT count, LIS_SCALAR value[], LIS_VECTOR v) { LIS_INT n,np,gn,i,is,ie; LIS_DEBUG_FUNC_IN; np = v->np; n = v->n; gn = v->gn; is = v->is; ie = v->ie; if(v->status==LIS_VECTOR_NULL) { v->value = (LIS_SCALAR *)lis_malloc( np*sizeof(LIS_SCALAR),"lis_vector_set_values::v->value" ); if( NULL==v->value ) { LIS_SETERR_MEM(np*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } v->is_copy = LIS_TRUE; v->status = LIS_VECTOR_ASSEMBLING; } if(flag==LIS_INS_VALUE) { for(i=0;i<count;i++) { start = i; if( v->origin ) start--; if( start<is || start>=ie ) { if( v->origin ) { is++; ie++; start++; i++; } LIS_SETERR3(LIS_ERR_ILL_ARG, "%d is less than %d or larger than %d\n",start,is,ie); return LIS_ERR_ILL_ARG; } v->value[start-is] = value[i]; } } else { for(i=0;i<count;i++) { start = i; if( v->origin ) start++; if( start<is || start>=ie ) { if( v->origin ) { is++; ie++; start++; i++; } LIS_SETERR3(LIS_ERR_ILL_ARG, "%d is less than %d or larger than %d\n",start,is,ie); return LIS_ERR_ILL_ARG; } v->value[start-is] += value[i]; } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_get_size" LIS_INT lis_vector_get_size(LIS_VECTOR v, LIS_INT *local_n, LIS_INT *global_n) { LIS_INT err; LIS_DEBUG_FUNC_IN; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; *local_n = v->n; *global_n = v->gn; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_get_range" LIS_INT lis_vector_get_range(LIS_VECTOR v, LIS_INT *is, LIS_INT *ie) { LIS_INT err; LIS_DEBUG_FUNC_IN; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; if( v->origin ) { *is = v->is + 1; *ie = v->ie + 1; } else { *is = v->is; *ie = v->ie; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_get_value" LIS_INT lis_vector_get_value(LIS_VECTOR v, LIS_INT i, LIS_SCALAR *value) { LIS_INT err,n,gn,is,ie; LIS_DEBUG_FUNC_IN; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; n = v->n; gn = v->gn; is = v->is; ie = v->ie; if( v->origin ) i--; if( i<is || i>=ie ) { if( v->origin ) { i++; is++; ie++; } LIS_SETERR3(LIS_ERR_ILL_ARG, "i(=%d) is less than %d or larger than %d\n",i,is,ie); return LIS_ERR_ILL_ARG; } *value = v->value[i-is]; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_get_values" LIS_INT lis_vector_get_values(LIS_VECTOR v, LIS_INT start, LIS_INT count, LIS_SCALAR value[]) { LIS_INT err,n,gn,i,is,ie; LIS_DEBUG_FUNC_IN; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; n = v->n; gn = v->gn; is = v->is; ie = v->ie; if( v->origin ) start--; if( start<is || start>=ie ) { if( v->origin ) { start++; is++; ie++; } LIS_SETERR3(LIS_ERR_ILL_ARG, "start(=%d) is less than %d or larger than %d\n",start,is,ie); return LIS_ERR_ILL_ARG; } if( (start-is+count)>n ) { LIS_SETERR3(LIS_ERR_ILL_ARG, "start(=%d) + count(=%d) exceeds the range of vector v(=%d).\n",start,count,ie); return LIS_ERR_ILL_ARG; } for(i=0;i<count;i++) { value[i] = v->value[start-is + i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } /* #undef __FUNC__ #define __FUNC__ "lis_vector_set_destroyflag" LIS_INT lis_vector_set_destroyflag(LIS_VECTOR v, LIS_INT flag) { LIS_INT err; LIS_DEBUG_FUNC_IN; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; if( flag ) { v->is_destroy = LIS_TRUE; } else { v->is_destroy = LIS_FALSE; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_vector_get_destroyflag" LIS_INT lis_vector_get_destroyflag(LIS_VECTOR v, LIS_INT *flag) { LIS_INT err; LIS_DEBUG_FUNC_IN; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; *flag = v->is_destroy; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } */ LIS_INT lis_vector_print(LIS_VECTOR x) { #ifdef USE_MPI LIS_INT err,i,ii,is,n,k,nprocs,my_rank; err = lis_vector_check(x,LIS_VECTOR_CHECK_NULL); if( err ) return err; nprocs = x->nprocs; my_rank = x->my_rank; n = x->n; is = x->is; for(k=0;k<nprocs;k++) { if( k==my_rank ) { for(i=0;i<n;i++) { ii = i+is; if( x->origin ) ii++; printf("%6d %e\n",ii,x->value[i]); } } MPI_Barrier(x->comm); } return LIS_SUCCESS; #else LIS_INT err,i,ii,n; err = lis_vector_check(x,LIS_VECTOR_CHECK_NULL); if( err ) return err; n = x->n; for(i=0; i<n; i++) { ii = i; if( x->origin ) ii++; if( x->precision==LIS_PRECISION_DEFAULT ) { printf("%6d %e\n",ii,x->value[i]); } else { printf("%6d %e,%e\n",ii,x->value[i],x->value_lo[i]); } } return LIS_SUCCESS; #endif } #undef __FUNC__ #define __FUNC__ "lis_vector_scatter" LIS_INT lis_vector_scatter(LIS_SCALAR value[], LIS_VECTOR v) { #ifdef USE_MPI LIS_INT err,i,is,n,my_rank,nprocs,*sendcounts; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; my_rank = v->my_rank; nprocs = v->nprocs; n = v->n; is = v->is; sendcounts = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_vector_scatter::sendcounts" ); for(i=0; i<nprocs; i++) { sendcounts[i] = v->ranges[i+1] - v->ranges[i]; } MPI_Scatterv(&value[0],sendcounts,v->ranges,MPI_DOUBLE,&value[is],n,MPI_DOUBLE,0,v->comm); #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { v->value[i] = value[i+is]; } return LIS_SUCCESS; #else LIS_INT err,i,n; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; n = v->n; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { v->value[i] = value[i]; } return LIS_SUCCESS; #endif } #undef __FUNC__ #define __FUNC__ "lis_vector_gather" LIS_INT lis_vector_gather(LIS_VECTOR v, LIS_SCALAR value[]) { #ifdef USE_MPI LIS_INT err,i,j,is,n,my_rank,nprocs,*recvcounts; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; my_rank = v->my_rank; nprocs = v->nprocs; n = v->n; is = v->is; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { value[i+is] = v->value[i]; } recvcounts = (LIS_INT *)lis_malloc( (nprocs+1)*sizeof(LIS_INT),"lis_vector_gather::recvcounts" ); for(i=0; i<nprocs; i++) { recvcounts[i] = v->ranges[i+1] - v->ranges[i]; } MPI_Allgatherv(&value[is],n,MPI_DOUBLE,&value[0],recvcounts,v->ranges,MPI_DOUBLE,v->comm); return LIS_SUCCESS; #else LIS_INT err,i,n; err = lis_vector_check(v,LIS_VECTOR_CHECK_NULL); if( err ) return err; n = v->n; #ifdef USE_VEC_COMP #pragma cdir nodep #endif #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { value[i] = v->value[i]; } return LIS_SUCCESS; #endif } #undef __FUNC__ #define __FUNC__ "lis_vector_is_null" LIS_INT lis_vector_is_null(LIS_VECTOR v) { LIS_DEBUG_FUNC_IN; if( v->status==LIS_VECTOR_NULL ) return LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_FALSE; }
GB_binop__lt_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_08__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_02__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_04__lt_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_int8) // A*D function (colscale): GB (_AxD__lt_int8) // D*A function (rowscale): GB (_DxB__lt_int8) // C+=B function (dense accum): GB (_Cdense_accumB__lt_int8) // C+=b function (dense accum): GB (_Cdense_accumb__lt_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_int8) // C=scalar+B GB (_bind1st__lt_int8) // C=scalar+B' GB (_bind1st_tran__lt_int8) // C=A+scalar GB (_bind2nd__lt_int8) // C=A'+scalar GB (_bind2nd_tran__lt_int8) // C type: bool // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_INT8 || GxB_NO_LT_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
section.c
#include <omp.h> #include <stdio.h> #define N 1000 int main (){ int i,id; float a[N], b[N], c[N]; /* Some initializations */ for (i=0; i < N; i++) a[i] = b[i] = i * 1.0; #pragma omp parallel shared(a,b,c) private(i,id) { #pragma omp sections nowait { #pragma omp section { id = omp_get_thread_num(); printf("working in Thread %d\n",id); for (i=0; i < N/2; i++){ printf("%x:", id); c[i] = a[i] + b[i]; } } #pragma omp section { id = omp_get_thread_num(); printf("working in Thread %d\n",id); for (i=N/2; i < N; i++){ printf("%x:", id); c[i] = a[i] + b[i]; } } } /* end of sections */ } /* end of parallel section */ }
mxEvaluateSourceTopography1d.c
#include "../../@SWEAbstract1d/private/mxSWE1d.h" #include "mex.h" #define NRHS 4 #define NLHS 1 #define NVAR 2 void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* check input & output */ if (nrhs != NRHS) { mexPrintf("Matlab:%s:InvalidNumberInput,\n", __FILE__); mexPrintf("%d inputs required.\n", NRHS); } if (nlhs != NLHS) { mexPrintf("Matlab:%s:InvalidNumberOutput,\n", __FILE__); mexPrintf("%d inputs required.\n", NLHS); } double gra = mxGetScalar(prhs[0]); signed char *regType = (signed char *)mxGetData(prhs[1]); PhysField1d fphys = convertMexToPhysField(prhs[2]); // double *fphys = mxGetPr(prhs[2]); double *zgrad = mxGetPr(prhs[3]); // const mwSize *dims = mxGetDimensions(prhs[2]); const size_t Np = fphys.Np; const size_t K = fphys.K; const mwSize dimLen[3] = {Np, K, NVAR}; plhs[0] = mxCreateNumericArray(3, dimLen, mxDOUBLE_CLASS, mxREAL); PhysField1d source = convertMexToPhysField(plhs[0]); double *bx = zgrad; // double *sourceH = mxGetPr(plhs[0]); // double *sourceQx = sourceH + K * Np; #ifdef _OPENMP #pragma omp parallel for num_threads(DG_THREADS) #endif for (int k = 0; k < K; k++) { NdgRegionType type = (NdgRegionType)regType[k]; if (type == NdgRegionDry) continue; for (int n = 0; n < Np; n++) { int sk = k * Np + n; source.hu[sk] = -gra * (fphys.h[sk] + fphys.z[sk]) * bx[sk]; } } return; }
t_cholmod_subtree.c
/* ========================================================================== */ /* === GPU/t_cholmod_subtree ================================================ */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis * The CHOLMOD/GPU Module is licensed under Version 2.0 of the GNU * General Public License. See gpl.txt for a text of the license. * CHOLMOD is also available under other licenses; contact authors for details. * http://www.suitesparse.com * -------------------------------------------------------------------------- */ /* * File: * t_cholmod_subtree * * Description: * Contains functions for GPU subtrees * algorithm. * */ #ifdef SUITESPARSE_CUDA /* include */ #include <string.h> #include "cholmod_template.h" #include "batchKernels.h" #include <cusolverDn.h> #include <time.h> /* * Function: * gpu_init * * Description: * Performs required initialization for GPU computing. * Returns 0 if there is an error, disabling GPU computing (useGPU = 0) * */ int TEMPLATE2 (CHOLMOD (gpu_init)) ( cholmod_common *Common, cholmod_factor *L, cholmod_sparse *A, cholmod_global_pointers *gb_p, cholmod_gpu_pointers *gpu_p, int gpuid ) { /* local variables */ int idx, cache_idx; Int i, LsDim, ApDim, AiDim, AxDim; Int *h_Ls, *h_Ap, *h_Ai, *Ls, *Ap, *Ai; double *h_Ax, *Ax; int numThreads; cudaError_t cudaErr; cublasStatus_t cublasError; cusolverStatus_t cusolverErr; numThreads = Common->ompNumThreads; { #ifdef USE_NVTX nvtxRangeId_t range1 = nvtxRangeStartA("gpu_init"); #endif /* set gpu memory pointers */ gpu_p->gpuPtr[gpuid] = Common->dev_mempool[gpuid]; /* type (double *) */ /* pointers to supernode matrices */ gpu_p->d_ptrSuper[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += 3*gb_p->ptrSuperSize; /* pointers to descendant matrices */ gpu_p->d_ptrDesc[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += 6*gb_p->ptrDescSize; /* type (double) */ for (idx = 0; idx < IBUFF_LOOPSIZE; idx++) { gpu_p->d_LxFactorized[gpuid][idx] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->LxSizeFactorized; } /* Lx */ gpu_p->d_Lx[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->LxSize; /* C */ gpu_p->d_C[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->CSize; /* Ax */ gpu_p->d_Ax[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->AxSize; /* type (Int) */ for (idx = 0; idx < IBUFF_LOOPSIZE; idx++) { for (cache_idx = 0; cache_idx < MAP_CACHESIZE; cache_idx++) { gpu_p->d_MapFactorized[gpuid][idx][cache_idx] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->MapSizeFactorized; } } /* Ap */ gpu_p->d_Ap[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->ApSize; /* Ai */ gpu_p->d_Ai[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->AiSize; /* Ls */ gpu_p->d_Ls[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->LsSize; /* Map */ gpu_p->d_Map[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += gb_p->MapSize; /* type (int) */ /* dimensions of supernode matrices */ gpu_p->d_dimSuper[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += 13*gb_p->dimSuperSize; /* dimensions of descendant matrices */ gpu_p->d_dimDesc[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += 14*gb_p->dimDescSize; /* info */ gpu_p->d_info[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += sizeof(int); /* devSync memory for custom cusolverDnDpotrf */ gpu_p->d_devSync[gpuid] = gpu_p->gpuPtr[gpuid]; gpu_p->gpuPtr[gpuid] += 2*sizeof(int)*gb_p->maxbatch; /* cuSolver Cholesky initialization (get workspace size) */ cusolverErr = cusolverDnDpotrf_bufferSize(Common->cusolverHandle[gpuid], CUBLAS_FILL_MODE_LOWER, gb_p->maxnscol, gpu_p->d_C[gpuid], gb_p->maxnsrow, &gb_p->work_size); if (cusolverErr != CUSOLVER_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU cusolverDnDpotrf_bufferSize failure"); } /* Copy arrays to device from pinned memory */ Ls = L->s; Ap = A->p; Ai = A->i; Ax = A->x; LsDim = gb_p->LsSize / sizeof(Int); ApDim = gb_p->ApSize / sizeof(Int); AiDim = gb_p->AiSize / sizeof(Int); AxDim = gb_p->AxSize / sizeof(double); /* Set pinned memory pointers */ gpu_p->hostPtr[gpuid] = Common->host_pinned_mempool[gpuid]; /* Ax */ h_Ax = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += gb_p->AxSize; /* Ls */ h_Ls = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += gb_p->LsSize; /* Ap */ h_Ap = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += gb_p->ApSize; /* Ai */ h_Ai = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += gb_p->AiSize; cudaStream_t copystream, memsetstream; cudaErr = cudaStreamCreate (&copystream); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaStreamCreate(copystream)"); cudaErr = cudaStreamCreate (&memsetstream); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaStreamCreate(memsetstream)"); /* Copy arrays to pinned memory from regular memory */ #pragma omp parallel for num_threads(numThreads) private(i) for(i = 0; i < AxDim; i++) { Ax[i] = Ax[i]; } #pragma omp parallel for num_threads(numThreads) private(i) for(i = 0; i < AxDim; i++) { h_Ax[i] = h_Ax[i]; } #pragma omp parallel for num_threads(numThreads) private(i) for(i = 0; i < AxDim; i++) { h_Ax[i] = Ax[i]; } cudaErr = cudaMemcpyAsync ( gpu_p->d_Ax[gpuid], h_Ax, gb_p->AxSize, cudaMemcpyHostToDevice, copystream); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ax)"); #pragma omp parallel for num_threads(numThreads) private(i) for(i = 0; i < LsDim; i++){ h_Ls[i] = Ls[i]; } cudaErr = cudaMemcpyAsync ( gpu_p->d_Ls[gpuid], h_Ls, gb_p->LsSize, cudaMemcpyHostToDevice, copystream); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ls)"); #pragma omp parallel for num_threads(numThreads) private(i) for(i = 0; i < ApDim; i++){ h_Ap[i] = Ap[i]; } cudaErr = cudaMemcpyAsync ( gpu_p->d_Ap[gpuid], h_Ap, gb_p->ApSize, cudaMemcpyHostToDevice, copystream); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ap)"); #pragma omp parallel for num_threads(numThreads) private(i) for(i = 0; i < AiDim; i++){ h_Ai[i] = Ai[i]; } cudaErr = cudaMemcpyAsync ( gpu_p->d_Ai[gpuid], h_Ai, gb_p->AiSize, cudaMemcpyHostToDevice, copystream); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ai)"); /* Copy arrays to device from pinned memory */ cudaErr = cudaMemsetAsync ( gpu_p->d_Lx[gpuid], 0, gb_p->LxSize, memsetstream); CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemset(d_Lx)"); /* set pinned memory pointers */ gpu_p->hostPtr[gpuid] = Common->host_pinned_mempool[gpuid]; /* type (double *) */ for (idx = 0; idx < IBUFF_LOOPSIZE; idx++) { gpu_p->h_LxFactorized[gpuid][idx] = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += gb_p->LxSizeFactorized; } gpu_p->h_ptrSuper[gpuid] = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += 3*gb_p->ptrSuperSize; gpu_p->h_ptrDesc[gpuid] = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += 6*gb_p->ptrDescSize; /* type (double) */ gpu_p->h_Lx[gpuid] = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += gb_p->LxSize; /* type (Int) */ gpu_p->h_darray[gpuid] = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += gb_p->ScoreSizeFactorized; /* type (int) */ gpu_p->h_dimSuper[gpuid] = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += 13*gb_p->dimSuperSize; gpu_p->h_dimDesc[gpuid] = gpu_p->hostPtr[gpuid]; gpu_p->hostPtr[gpuid] += 14*gb_p->dimDescSize; /* get device pointer to pinted buffer */ cudaErr = cudaHostGetDevicePointer ( (void**)&gpu_p->h_pLx[gpuid], gpu_p->h_Lx[gpuid], 0); if (cudaErr) { printf("HostGetDevicePointer - cudaError:%s\n",cudaGetErrorString(cudaErr)); ERROR ( CHOLMOD_GPU_PROBLEM, "GPU cudaHostGetDevicePointer"); } #ifdef USE_NVTX nvtxRangeEnd(range1); #endif } return (1); /* initialization successfull, useGPU = 1 */ } /* * Function: * gpu_initialize_supernode_batch * * Description: * Initializes a batch of supernodes. * Performs three tasks: * 1. sets device pointers * 2. creates map on device * 3. initializes Lx (factor) on device * */ void TEMPLATE2 (CHOLMOD (gpu_initialize_supernode_batch)) ( cholmod_common *Common, cholmod_global_pointers *gb_p, cholmod_gpu_pointers *gpu_p, /* device pointers */ Int n, Int maxsnsrow, /* max nsrow for batch of supernodes */ Int maxkdif, /* max kdif for batch of supernodes */ Int nzmax, /* max nzmax for batch of supernodes */ Int strideSize, /* stride for descendant lists */ int nbatch, /* size of batch */ int gpuid /* gpu id */ ) { /* local variables */ int stream, maxbatch; struct cholmod_super_ptrs_t *h_super, *d_super; cudaError_t cudaStat; maxbatch = gb_p->maxbatch; /* * Set pointers for batching arrays: * sets pointers for storing dimensions and matrix addresses of * supernodes and descendants, in the order they are batched. */ /* set pointers for storing dimensions of batched supernodes */ gpu_p->d_potrf[gpuid].n = gpu_p->d_dimSuper[gpuid] + 0*maxbatch; gpu_p->d_potrf[gpuid].lda = gpu_p->d_dimSuper[gpuid] + 1*maxbatch; gpu_p->d_trsm[gpuid].m = gpu_p->d_dimSuper[gpuid] + 2*maxbatch; gpu_p->d_trsm[gpuid].n = gpu_p->d_dimSuper[gpuid] + 3*maxbatch; gpu_p->d_trsm[gpuid].lda = gpu_p->d_dimSuper[gpuid] + 4*maxbatch; gpu_p->d_trsm[gpuid].ldb = gpu_p->d_dimSuper[gpuid] + 5*maxbatch; gpu_p->d_super[gpuid].s = gpu_p->d_dimSuper[gpuid] + 6*maxbatch; gpu_p->d_super[gpuid].k1 = gpu_p->d_dimSuper[gpuid] + 7*maxbatch; gpu_p->d_super[gpuid].k2 = gpu_p->d_dimSuper[gpuid] + 8*maxbatch; gpu_p->d_super[gpuid].psi = gpu_p->d_dimSuper[gpuid] + 9*maxbatch; gpu_p->d_super[gpuid].psx = gpu_p->d_dimSuper[gpuid] + 10*maxbatch; gpu_p->d_super[gpuid].nscol = gpu_p->d_dimSuper[gpuid] + 11*maxbatch; gpu_p->d_super[gpuid].nsrow = gpu_p->d_dimSuper[gpuid] + 12*maxbatch; /* set pointers for storing pointers of matrices of batched supernodes */ gpu_p->d_potrf[gpuid].A = gpu_p->d_ptrSuper[gpuid] + 0*maxbatch; gpu_p->d_trsm[gpuid].A = gpu_p->d_ptrSuper[gpuid] + 1*maxbatch; gpu_p->d_trsm[gpuid].B = gpu_p->d_ptrSuper[gpuid] + 2*maxbatch; /* set pointers for storing dimensions of batched descendants */ gpu_p->d_syrk[gpuid].n = gpu_p->d_dimDesc[gpuid] + 0*strideSize; gpu_p->d_syrk[gpuid].k = gpu_p->d_dimDesc[gpuid] + 1*strideSize; gpu_p->d_syrk[gpuid].lda = gpu_p->d_dimDesc[gpuid] + 2*strideSize; gpu_p->d_syrk[gpuid].ldc = gpu_p->d_dimDesc[gpuid] + 3*strideSize; gpu_p->d_gemm[gpuid].m = gpu_p->d_dimDesc[gpuid] + 4*strideSize; gpu_p->d_gemm[gpuid].n = gpu_p->d_dimDesc[gpuid] + 5*strideSize; gpu_p->d_gemm[gpuid].k = gpu_p->d_dimDesc[gpuid] + 6*strideSize; gpu_p->d_gemm[gpuid].lda = gpu_p->d_dimDesc[gpuid] + 7*strideSize; gpu_p->d_gemm[gpuid].ldb = gpu_p->d_dimDesc[gpuid] + 8*strideSize; gpu_p->d_gemm[gpuid].ldc = gpu_p->d_dimDesc[gpuid] + 9*strideSize; gpu_p->d_desc[gpuid].ndrow1 = gpu_p->d_dimDesc[gpuid] + 10*strideSize; gpu_p->d_desc[gpuid].ndrow2 = gpu_p->d_dimDesc[gpuid] + 11*strideSize; gpu_p->d_desc[gpuid].pdi1 = gpu_p->d_dimDesc[gpuid] + 12*strideSize; gpu_p->d_desc[gpuid].s = gpu_p->d_dimDesc[gpuid] + 13*strideSize; /* set pointers for storing pointers of matrices of batched descendants */ gpu_p->d_syrk[gpuid].A = gpu_p->d_ptrDesc[gpuid] + 0*strideSize; gpu_p->d_syrk[gpuid].C = gpu_p->d_ptrDesc[gpuid] + 1*strideSize; gpu_p->d_gemm[gpuid].A = gpu_p->d_ptrDesc[gpuid] + 2*strideSize; gpu_p->d_gemm[gpuid].B = gpu_p->d_ptrDesc[gpuid] + 3*strideSize; gpu_p->d_gemm[gpuid].C = gpu_p->d_ptrDesc[gpuid] + 4*strideSize; gpu_p->d_desc[gpuid].C = gpu_p->d_ptrDesc[gpuid] + 5*strideSize; /* set dimensions*/ h_super = &gpu_p->h_super[gpuid]; d_super = &gpu_p->d_super[gpuid]; /* create map for batch of supernodes */ createMapOnDevice_batch ( gpu_p->d_Map[gpuid], gpu_p->d_Ls[gpuid], d_super->psi, d_super->nsrow, maxsnsrow, n, nbatch, &(Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS])); cudaStat = cudaGetLastError(); if (cudaStat) { printf("error: %s\n",cudaGetErrorString(cudaStat)); ERROR (CHOLMOD_GPU_PROBLEM, "GPU createMapOnDevice_batch\n") ; } /* initialize Lx (factor) for batch of supernoeds */ initLxOnDevice_batch( gpu_p->d_Lx[gpuid], gpu_p->d_Ax[gpuid], gpu_p->d_Ap[gpuid], gpu_p->d_Ai[gpuid], gpu_p->d_Map[gpuid], d_super->nsrow, d_super->psx, d_super->k1, d_super->k2, nzmax, maxkdif, n, nbatch, &(Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS])); cudaStat = cudaGetLastError(); if (cudaStat!=cudaSuccess) { printf("error: %s\n",cudaGetErrorString(cudaStat)); ERROR (CHOLMOD_GPU_PROBLEM, "GPU initLxonDevice_batch\n") ; } } /* * Function: * gpu_updateC_batch * * Description: * Computes the schur compliment (DSYRK & DGEMM) of a batch of supernodes and maps * it back (addUpdate). * Performs three tasks: * 1. DSYRK * a. cuBlas calls (n > D_N || k > D_K) * b. batched call (otherwise) * * 2. DGEMM * a. cuBlas calls (m > D_M || n > D_N || k > D_K) * b. batched call (otherwise) * * 3. addUpdate (map schur comp. to supernode) * a. update one supernode (ndrow1 > D_ROW1 || ndrow2 > D_ROW2) * b. batched call (otherwise) * */ void TEMPLATE2 (CHOLMOD (gpu_updateC_batch)) ( cholmod_common *Common, cholmod_global_pointers *gb_p, cholmod_gpu_pointers *gpu_p, cholmod_cpu_pointers *cpu_p, cholmod_tree_pointers *tree_p, cholmod_profile_pointers *prof_p, int n, int subtree, int level, Int node, int nbatch, /* size of batch (# descendants) */ Int syrk_count, /* # syrk calls sent to cuBlas */ Int gemm_count, /* # gemm calls sent to cuBlas */ Int update_count, /* # addUpdate calls to stream seperately */ int gpuid, int flag, Int start, Int end, Int *max_dim, /* max m,n dimensions for batch */ Int *LpxSub, Int *Lpx ) { /* local variables */ Int i, j, k; Int *ndrow1, *ndrow2, *ndrow3, *ndrow, *ndcol, *pdi1, *list, *nsrow; double alpha, beta, tstart1; double **Aptr, **Bptr, **Cptr, **C2ptr, *tend, *gemm_time, *syrk_time; struct cholmod_syrk_ptrs_t *h_syrk, *d_syrk; struct cholmod_gemm_ptrs_t *h_gemm, *d_gemm; struct cholmod_desc_ptrs_t *h_desc, *d_desc; struct cholmod_super_ptrs_t *h_super, *d_super; cublasStatus_t cublasStatus ; cudaError_t cudaStat; int vgpuid; alpha = -1.0 ; beta = 0.0 ; /* set profile pointers */ tend = prof_p->f_end[gpuid]; syrk_time = prof_p->syrk_time[gpuid]; gemm_time = prof_p->gemm_time[gpuid]; /* * Perform DSYRK * Compute dsyrk for all descendants * of a batch of supernodes. */ TIMER_START1(tstart1); /* set dimensions */ h_syrk = &gpu_p->h_syrk[gpuid]; d_syrk = &gpu_p->d_syrk[gpuid]; /* loop over 'large' syrk's */ for(i = 0; i < syrk_count; i++) { j = i % CHOLMOD_DEVICE_STREAMS; k = i / CHOLMOD_DEVICE_STREAMS / Common->numGPU % Common->numGPU_subtree_parallel; //vgpuid = gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; /* set cublas stream */ cublasStatus = cublasSetStream (Common->cublasHandle[gpuid], Common->gpuStream[vgpuid][j]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU cublasSetStream failure"); } /* dsyrk on cuBlas */ cublasDsyrk ( Common->cublasHandle[gpuid], CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, h_syrk->n[i], h_syrk->k[i], &alpha, h_syrk->A[i], h_syrk->lda[i], &beta, h_syrk->C[i], h_syrk->ldc[i]); //cudaEventRecord(Common->updateCKernelsComplete[gpuid * Common->numGPU_parallel], Common->gpuStream[vgpuid][j]); } /* check if any syrk's left for batching */ if( (nbatch - syrk_count) > 0 ) { vgpuid = gpuid * Common->numGPU_parallel; /* dsyrk on batched kernels */ dsyrk_custom_simple_1block_batch( Common->gpuStream[vgpuid][CHOLMOD_DEVICE_STREAMS], CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, &d_syrk->n[syrk_count], &d_syrk->k[syrk_count], &alpha, &d_syrk->A[syrk_count], &d_syrk->lda[syrk_count], &beta, &d_syrk->C[syrk_count], &d_syrk->ldc[syrk_count], nbatch-syrk_count); //cudaEventRecord(Common->updateCKernelsComplete[gpuid * Common->numGPU_parallel], Common->gpuStream[vgpuid][CHOLMOD_DEVICE_STREAMS]); } TIMER_END1(tstart1,syrk_time,0); TIMER_END1(tstart1,syrk_time,1); /* * Perform DGEMM * Compute dgemm for all descendants * of a batch of supernodes. */ TIMER_START1(tstart1); /* set dimensions */ h_gemm = &gpu_p->h_gemm[gpuid]; d_gemm = &gpu_p->d_gemm[gpuid]; /* loop over 'large' gemm's */ for(i = 0; i < gemm_count; i++) { j = i % CHOLMOD_DEVICE_STREAMS; k = i / CHOLMOD_DEVICE_STREAMS / Common->numGPU % Common->numGPU_subtree_parallel; //vgpuid = gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; /* set cublas stream */ cublasStatus = cublasSetStream (Common->cublasHandle[gpuid], Common->gpuStream[vgpuid][j]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU cublasSetStream failure"); } if (h_gemm->m[i] > 0) /* dgemm on cuBlas */ cublasDgemm ( Common->cublasHandle[gpuid], CUBLAS_OP_N, CUBLAS_OP_T, h_gemm->m[i], h_gemm->n[i], h_gemm->k[i], &alpha, h_gemm->A[i], h_gemm->lda[i], h_gemm->B[i], h_gemm->ldb[i], &beta, h_gemm->C[i], h_gemm->ldc[i]); //cudaEventRecord(Common->updateCKernelsComplete[gpuid * Common->numGPU_parallel], Common->gpuStream[vgpuid][j]); } /* check if any gemm's left for batching */ if( (nbatch - gemm_count) > 0 ) { vgpuid = gpuid * Common->numGPU_parallel; /* dgemm on batched kernels */ dgemm_custom_simple_1block_batch( Common->gpuStream[vgpuid][CHOLMOD_DEVICE_STREAMS], CUBLAS_OP_N, CUBLAS_OP_T, &d_gemm->m[gemm_count], &d_gemm->n[gemm_count], &d_gemm->k[gemm_count], &alpha, &d_gemm->A[gemm_count], &d_gemm->lda[gemm_count], &d_gemm->B[gemm_count], &d_gemm->ldb[gemm_count], &beta, &d_gemm->C[gemm_count], &d_gemm->ldc[gemm_count], nbatch-gemm_count); //cudaEventRecord(Common->updateCKernelsComplete[gpuid * Common->numGPU_parallel], Common->gpuStream[vgpuid][CHOLMOD_DEVICE_STREAMS]); } TIMER_END1(tstart1,gemm_time,0); TIMER_END1(tstart1,gemm_time,1); /* copy supernode from pinned to regular memory - only at last level */ //if ( level == tree_p->supernode_num_levels[subtree]-1 ) if ( level <= tree_p->supernode_num_levels[subtree]-1 && level > 0 && node == 0 ) { const Int start = tree_p->supernode_levels_ptrs[tree_p->supernode_levels_subtree_ptrs[subtree]+level-1]; const Int end = tree_p->supernode_levels_ptrs[tree_p->supernode_levels_subtree_ptrs[subtree]+level]; TEMPLATE2 (CHOLMOD(gpu_copy_supernode))( Common, gpu_p, cpu_p, tree_p, subtree, level-1, gpuid, 2, start, end, LpxSub, Lpx); } /* synchronize streams */ for(i = 0; i < CHOLMOD_DEVICE_STREAMS; i++){ for (k = 0; k < Common->numGPU_subtree_parallel; k++) { for (vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel; vgpuid < k * Common->numGPU + (gpuid+1) * Common->numGPU_parallel; vgpuid++) { //cudaStat = cudaStreamWaitEvent (Common->gpuStream[vgpuid][i], Common->updateCKernelsComplete[gpuid * Common->numGPU_parallel], 0); cudaStat = cudaStreamSynchronize (Common->gpuStream[vgpuid][i]) ; if (cudaStat) { printf("error: %s\n",cudaGetErrorString(cudaStat)); ERROR (CHOLMOD_GPU_PROBLEM, "GPU dgemm_custom_simple_1block_batch") ; } } } } //cudaStat = cudaStreamWaitEvent (Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS], Common->updateCKernelsComplete[gpuid * Common->numGPU_parallel], 0); cudaStat = cudaStreamSynchronize (Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS]); /* * Perform addUpdate * Add schur compliment for all descendants * of a batch of supernodes. */ TIMER_START1(tstart1); /* set dimensions */ h_desc = &gpu_p->h_desc[gpuid]; d_desc = &gpu_p->d_desc[gpuid]; h_super = &gpu_p->h_super[gpuid]; d_super = &gpu_p->d_super[gpuid]; /* check if any addUpdate's left for batching */ if( (nbatch - update_count) > 0 ) { vgpuid = gpuid * Common->numGPU_parallel; /* mapping (to factor Lx) for descendants in a batch of supernodes */ addUpdateOnDevice_batch( gpu_p->d_Lx[gpuid], &d_desc->C[update_count], gpu_p->d_Map[gpuid], gpu_p->d_Ls[gpuid], n, &d_desc->pdi1[update_count], &d_desc->ndrow1[update_count], &d_desc->ndrow2[update_count], d_super->psx, d_super->nsrow, &d_desc->s[update_count], max_dim, nbatch-update_count, &(Common->gpuStream[vgpuid][CHOLMOD_DEVICE_STREAMS])); } /* loop over 'large' addUpdate's */ for(i = 0; i < update_count; i++) { j = i % CHOLMOD_DEVICE_STREAMS; k = i / CHOLMOD_DEVICE_STREAMS / Common->numGPU % Common->numGPU_subtree_parallel; //vgpuid = gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; /* mapping (to factor Lx) for each descendants */ addUpdateOnDevice_large( gpu_p->d_Lx[gpuid], h_desc->C[i], gpu_p->d_Map[gpuid], gpu_p->d_Ls[gpuid], h_desc->pdi1[i], h_desc->ndrow1[i], h_desc->ndrow2[i], h_super->psx[h_desc->s[i]], h_super->nsrow[h_desc->s[i]], (Int)((n+1)*h_desc->s[i]), &(Common->gpuStream[vgpuid][j])); } /* synchronize streams */ for(i = 0; i < CHOLMOD_DEVICE_STREAMS; i++){ for (k = 0; k < Common->numGPU_subtree_parallel; k++) { for (vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel; vgpuid < k * Common->numGPU + (gpuid+1) * Common->numGPU_parallel; vgpuid++) { cudaStat = cudaStreamSynchronize (Common->gpuStream[vgpuid][i]) ; if (cudaStat) { printf("error: %s\n",cudaGetErrorString(cudaStat)); ERROR (CHOLMOD_GPU_PROBLEM, "GPU addUpdateOnDevice_batch") ; } } } } cudaStat = cudaStreamSynchronize (Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS]) ; TIMER_END1(tstart1,tend,4); } /* * Function: * gpu_lower_potrf_batch * * Description: * computes cholesky factoriation for a batch of supernodes. * Performs one task: * 1. DPOTRF * a. cuSolver calls (n < S_N) * b. batched call * */ void TEMPLATE2 (CHOLMOD (gpu_lower_potrf_batch)) ( cholmod_common *Common, cholmod_global_pointers *gb_p, cholmod_gpu_pointers *gpu_p, /* device pointers */ cholmod_profile_pointers *prof_p, int nbatch, /* batch size (# supernodes) */ Int potrf_count, /* # potrf calls sent to cuSolver */ int gpuid /* gpu id */ ) { /* local variables */ Int i, j, k, info; Int *nscol2, *nsrow; double tstart1; double **Aptr, *tend, *potrf_time; struct cholmod_potrf_ptrs_t *h_potrf, *d_potrf; cudaError_t cudaStat; cusolverStatus_t cusolverErr; int vgpuid; /* set profile pointers */ tend = prof_p->f_end[gpuid]; potrf_time = prof_p->potrf_time[gpuid]; /* * Perform batched DPOTRF * Compute potrf for all supernodes * in a batch */ TIMER_START1(tstart1); /* set dimensions */ h_potrf = &gpu_p->h_potrf[gpuid]; d_potrf = &gpu_p->d_potrf[gpuid]; /* loop over 'large' potrf's */ for(i=0; i<potrf_count; i++) { j = i % CHOLMOD_DEVICE_STREAMS; k = i / CHOLMOD_DEVICE_STREAMS / Common->numGPU % Common->numGPU_subtree_parallel; //vgpuid = gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; /* set cuSolver stream */ cusolverErr = cusolverDnSetStream (Common->cusolverHandle[gpuid], Common->gpuStream[vgpuid][j]) ; if (cusolverErr != CUSOLVER_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU cusolverDnSetStream failure"); } /* potrf on cuSolver */ cusolverDnDpotrf( Common->cusolverHandle[gpuid], CUBLAS_FILL_MODE_LOWER, h_potrf->n[i], h_potrf->A[i], h_potrf->lda[i], &gpu_p->d_devSync[gpuid][2*i], gb_p->work_size, gpu_p->d_info[gpuid]); } /* check if any potrf's left for batching */ if( (nbatch - potrf_count) > 0 ) { vgpuid = gpuid * Common->numGPU_parallel; /* potrf on custom batched kernels */ dpotrf_custom_simple_1block_batch ( Common->gpuStream[vgpuid][CHOLMOD_DEVICE_STREAMS], CUBLAS_FILL_MODE_LOWER, &d_potrf->n[potrf_count], &d_potrf->A[potrf_count], &d_potrf->lda[potrf_count], &info, nbatch-potrf_count) ; } /* synchronize streams */ for(i = 0; i < CHOLMOD_DEVICE_STREAMS; i++){ for (k = 0; k < Common->numGPU_subtree_parallel; k++) { for (vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel; vgpuid < k * Common->numGPU + (gpuid+1) * Common->numGPU_parallel; vgpuid++) { cudaStat = cudaStreamSynchronize (Common->gpuStream[vgpuid][i]) ; if (cudaStat) { printf("error: %s\n",cudaGetErrorString(cudaStat)); ERROR (CHOLMOD_GPU_PROBLEM, "GPU dpotrf_custom_simple_1block_batch") ; } } } } cudaStat = cudaStreamSynchronize (Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS]) ; TIMER_END1(tstart1,potrf_time,0); TIMER_END1(tstart1,potrf_time,1); } /* * Function: * gpu_triangular_solve_batch * * Description: * Computes triangular solve for a batch of supernodes. * Performs one task: * 1. DTRSM * a. cuBlas calls (m < S_M || n < S_N) * b. batched call (otherwise) * */ void TEMPLATE2 (CHOLMOD (gpu_triangular_solve_batch)) ( cholmod_common *Common, cholmod_global_pointers *gb_p, cholmod_gpu_pointers *gpu_p, /* device pointers */ cholmod_profile_pointers *prof_p, int nbatch, /* batch size (# supernodes) */ Int trsm_count, /* # trsm calls sent to cuBlas */ int gpuid /* gpu id */ ) { /* local variables */ Int i, j, k; Int *nsrow2, *nscol2, *nsrow; double alpha, tstart1; double **Aptr, **Bptr, *tend, *trsm_time; struct cholmod_trsm_ptrs_t *h_trsm, *d_trsm; cublasStatus_t cublasStatus ; cudaError_t cudaStat ; alpha = 1.0 ; int vgpuid; /* set profile pointers */ tend = prof_p->f_end[gpuid]; trsm_time = prof_p->trsm_time[gpuid]; /* * Perform batched DTRSM * Compute trsm for all supernodes * in a batch */ TIMER_START1(tstart1); /* set dimensions */ h_trsm = &gpu_p->h_trsm[gpuid]; d_trsm = &gpu_p->d_trsm[gpuid]; /* loop over 'large' trsm's */ for(i=0; i<trsm_count; i++) { j = i % CHOLMOD_DEVICE_STREAMS; k = i / CHOLMOD_DEVICE_STREAMS / Common->numGPU % Common->numGPU_subtree_parallel; //vgpuid = gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel + i / CHOLMOD_DEVICE_STREAMS % Common->numGPU_parallel; /* set cublas stream */ cublasStatus = cublasSetStream (Common->cublasHandle[gpuid], Common->gpuStream[vgpuid][j]) ; if (cublasStatus != CUBLAS_STATUS_SUCCESS) { ERROR (CHOLMOD_GPU_PROBLEM, "GPU cublasSetStream failure"); } if (h_trsm->m[i] > 0) /* trsm on cuBlas */ cublasDtrsm ( Common->cublasHandle[gpuid], CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, h_trsm->m[i], h_trsm->n[i], &alpha, h_trsm->A[i], h_trsm->lda[i], h_trsm->B[i], h_trsm->ldb[i]); } /* check if any trsm's left for batching */ if( (nbatch - trsm_count) > 0 ) { vgpuid = gpuid * Common->numGPU_parallel; /* trsm on custom batched kernels */ dtrsm_custom_simple_1block_batch ( Common->gpuStream[vgpuid][CHOLMOD_DEVICE_STREAMS], CUBLAS_SIDE_RIGHT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, &d_trsm->m[trsm_count], &d_trsm->n[trsm_count], &alpha, &d_trsm->A[trsm_count], &d_trsm->lda[trsm_count], &d_trsm->B[trsm_count], &d_trsm->ldb[trsm_count], nbatch-trsm_count); } /* synchronize streams */ for(i = 0; i < CHOLMOD_DEVICE_STREAMS; i++){ for (k = 0; k < Common->numGPU_subtree_parallel; k++) { for (vgpuid = k * Common->numGPU + gpuid * Common->numGPU_parallel; vgpuid < k * Common->numGPU + (gpuid+1) * Common->numGPU_parallel; vgpuid++) { cudaStat = cudaStreamSynchronize (Common->gpuStream[vgpuid][i]) ; if (cudaStat) { printf("error: %s\n",cudaGetErrorString(cudaStat)); ERROR (CHOLMOD_GPU_PROBLEM, "GPU dtrsm_custom_simple_1block_batch") ; } } } } cudaStat = cudaStreamSynchronize (Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS]) ; TIMER_END1(tstart1,trsm_time,0); TIMER_END1(tstart1,trsm_time,1); } /* * Function: * gpu_copy_supernode2 * * Description: * Copies batch of supernodes from device to pinned memory. * Performs zero-copies. * */ void TEMPLATE2 (CHOLMOD (gpu_copy_supernode2)) ( cholmod_common *Common, cholmod_global_pointers *gb_p, cholmod_gpu_pointers *gpu_p, /* device pointers */ int nbatch, /* batch size (# supernodes) */ Int maxnsrownscol, /* max nsrow*nscol in batch */ int gpuid /* gpu id */ ) { /* local variables */ int k; struct cholmod_super_ptrs_t *h_super, *d_super; cudaError_t cudaStat ; /* set dimensions */ h_super = &gpu_p->h_super[gpuid]; d_super = &gpu_p->d_super[gpuid]; /* * Perform copy factor * Copies a batch of supernodes from * device to pinned memory. */ #if 1 //#pragma omp parallel for num_threads(Common->ompNumThreads) if (nbatch > 32) for (k = 0; k < nbatch; k++) { cudaMemcpyAsync ( gpu_p->h_pLx[gpuid] + h_super->psx[k], gpu_p->d_Lx[gpuid] + h_super->psx[k], sizeof(double) * h_super->nscol[k] * h_super->nsrow[k], cudaMemcpyDeviceToHost, Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS+1]); } #else copyLx_small ( gpu_p->h_pLx[gpuid], gpu_p->d_Lx[gpuid], d_super->psx, d_super->nsrow, d_super->nscol, nbatch, maxnsrownscol, &Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS+1]); #endif cudaEventRecord (Common->updateCCopybackBuffered[gpuid * Common->numGPU_parallel], Common->gpuStream[gpuid * Common->numGPU_parallel][CHOLMOD_DEVICE_STREAMS+1]); } /* * Function: * gpu_copy_supernode * * Description: * copies batch of supernodes from pinned to regular memory * Performs two tasks: * 1. copy supernodes up to last level (flag=1) * 2. copy supernodes in last level (flag=2) * * Note: * in second case (flag=2) better to set omp threads across * work over single supernode, as there are few (generally * only one) supernodes in the final level. * */ void TEMPLATE2 (CHOLMOD (gpu_copy_supernode)) ( cholmod_common *Common, cholmod_gpu_pointers *gpu_p, /* device pointers */ cholmod_cpu_pointers *cpu_p, cholmod_tree_pointers *tree_p, int subtree, /* subtree */ int level, /* level */ int gpuid, /* gpu id */ int flag, /* flag (1: copy up to last level, 2: copy last level) */ Int start, /* first supernode in level */ Int end, /* last supernode in level */ Int *LpxSub, /* sub-factor in pinned memory */ Int *Lpx ) { /* local variables */ Int i, j, s, nscol, nsrow, offset1, offset2; cudaError_t cudaStat ; int numThreads; Int start_local, end_local; numThreads = Common->ompNumThreads; cudaEventSynchronize(Common->updateCCopybackBuffered[gpuid * Common->numGPU_parallel]); /* case 1: copy all supernodes up to last level */ if(flag==1) { start_local = tree_p->supernode_levels_ptrs[tree_p->supernode_levels_subtree_ptrs[subtree]]; end_local = tree_p->supernode_levels_ptrs[tree_p->supernode_levels_subtree_ptrs[subtree]+tree_p->supernode_num_levels[subtree]]; } /* end case 1*/ /* case 2: copy all supernodes in last level */ if(flag==2) { start_local = start; end_local = end; } /* loop over supernodes in last level */ #pragma omp parallel for num_threads(numThreads) schedule(static,1) private (i, j, s, nscol, nsrow, offset1, offset2) if (end_local - start_local >= numThreads) for(i = 0; i < (end_local - start_local); i++) { s = tree_p->supernode_levels[start_local + i]; nscol = cpu_p->Super [s+1] - cpu_p->Super [s] ; nsrow = cpu_p->Lpi[s+1] - cpu_p->Lpi[s] ; const Int ssize = nscol * nsrow; offset1 = ((Int*)Lpx)[s]; offset2 = LpxSub[s]; double* pLx = &(cpu_p->Lx[offset1]); const double* phLx = &(gpu_p->h_Lx[gpuid][offset2]); /* copy supernode - split single supernode amongst omp threads */ #pragma omp parallel for num_threads(numThreads) private(j) if (end_local - start_local < numThreads) for(j = 0; j < ssize; j++) { pLx[j] = phLx[j]; } } /* end loop over supernodes */ } #endif /* #undef REAL #undef COMPLEX #undef ZOMPLEX */
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelFxImage() applies a channel expression to the specified image. The % expression consists of one or more channels, either mnemonic or numeric (e.g. % red, 1), separated by actions as follows: % % <=> exchange two channels (e.g. red<=>blue) % => copy one channel to another channel (e.g. red=>green) % = assign a constant value to a channel (e.g. red=50%) % , write new image channels in the specified order (e.g. red, green) % | add a new output image for the next set of channel operations % ; move to the next input image for the source of channel data % % For example, to create 3 grayscale images from the red, green, and blue % channels of an image, use: % % -channel-fx "red; green; blue" % % A channel without an operation symbol implies separate (i.e, semicolon). % % The format of the ChannelFxImage method is: % % Image *ChannelFxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A channel expression. % % o exception: return any errors or warnings in this structure. % */ typedef enum { ExtractChannelOp, AssignChannelOp, ExchangeChannelOp, TransferChannelOp } ChannelFx; static MagickBooleanType ChannelImage(Image *destination_image, const PixelChannel destination_channel,const ChannelFx channel_op, const Image *source_image,const PixelChannel source_channel, const Quantum pixel,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; size_t height, width; ssize_t y; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); height=MagickMin(source_image->rows,destination_image->rows); width=MagickMin(source_image->columns,destination_image->columns); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelTrait destination_traits, source_traits; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } destination_traits=GetPixelChannelTraits(destination_image, destination_channel); source_traits=GetPixelChannelTraits(source_image,source_channel); if ((destination_traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; for (x=0; x < (ssize_t) width; x++) { if (channel_op == AssignChannelOp) SetPixelChannel(destination_image,destination_channel,pixel,q); else SetPixelChannel(destination_image,destination_channel, GetPixelChannel(source_image,source_channel,p),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(destination_image); } if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *ChannelFxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define ChannelFxImageTag "ChannelFx/Image" ChannelFx channel_op; ChannelType channel_mask; char token[MagickPathExtent]; const char *p; const Image *source_image; double pixel; Image *destination_image; MagickBooleanType status; PixelChannel source_channel, destination_channel; ssize_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); source_image=image; destination_image=CloneImage(source_image,0,0,MagickTrue,exception); if (destination_image == (Image *) NULL) return((Image *) NULL); if (expression == (const char *) NULL) return(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; pixel=0.0; p=(char *) expression; GetNextToken(p,&p,MagickPathExtent,token); channel_op=ExtractChannelOp; for (channels=0; *token != '\0'; ) { ssize_t i; /* Interpret channel expression. */ switch (*token) { case ',': { GetNextToken(p,&p,MagickPathExtent,token); break; } case '|': { if (GetNextImageInList(source_image) != (Image *) NULL) source_image=GetNextImageInList(source_image); else source_image=GetFirstImageInList(source_image); GetNextToken(p,&p,MagickPathExtent,token); break; } case ';': { Image *canvas; (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace, exception); } canvas=CloneImage(source_image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) { destination_image=DestroyImageList(destination_image); return(destination_image); } AppendImageToList(&destination_image,canvas); destination_image=GetLastImageInList(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } GetNextToken(p,&p,MagickPathExtent,token); channels=0; destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; break; } default: break; } i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } source_channel=(PixelChannel) i; channel_op=ExtractChannelOp; GetNextToken(p,&p,MagickPathExtent,token); if (*token == '<') { channel_op=ExchangeChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '=') { if (channel_op != ExchangeChannelOp) channel_op=AssignChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '>') { if (channel_op != ExchangeChannelOp) channel_op=TransferChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } switch (channel_op) { case AssignChannelOp: case ExchangeChannelOp: case TransferChannelOp: { if (channel_op == AssignChannelOp) pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0); else { i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } } destination_channel=(PixelChannel) i; if (i >= (ssize_t) GetPixelChannels(destination_image)) (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); if (image->colorspace != UndefinedColorspace) switch (destination_channel) { case RedPixelChannel: case GreenPixelChannel: case BluePixelChannel: case BlackPixelChannel: case IndexPixelChannel: break; case AlphaPixelChannel: { destination_image->alpha_trait=BlendPixelTrait; break; } case CompositeMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | CompositeMaskChannel); break; } case ReadMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | ReadMaskChannel); break; } case WriteMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | WriteMaskChannel); break; } case MetaPixelChannel: default: { (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); break; } } channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token)); if (((channels >= 1) || (destination_channel >= 1)) && (IsGrayColorspace(destination_image->colorspace) != MagickFalse)) (void) SetImageColorspace(destination_image,sRGBColorspace,exception); GetNextToken(p,&p,MagickPathExtent,token); break; } default: break; } status=ChannelImage(destination_image,destination_channel,channel_op, source_image,source_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; if (channel_op == ExchangeChannelOp) { status=ChannelImage(destination_image,source_channel,channel_op, source_image,destination_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; } switch (channel_op) { case ExtractChannelOp: { channel_mask=(ChannelType) (channel_mask | (1UL << destination_channel)); destination_channel=(PixelChannel) (destination_channel+1); break; } default: break; } status=SetImageProgress(source_image,ChannelFxImageTag,p-expression, strlen(expression)); if (status == MagickFalse) break; } (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace,exception); } return(GetFirstImageInList(destination_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *images,const ColorspaceType colorspace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o colorspace: the image colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse) { combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (colorspace != UndefinedColorspace) (void) SetImageColorspace(combine_image,colorspace,exception); else if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace,exception); else (void) SetImageColorspace(combine_image,sRGBColorspace,exception); switch (combine_image->colorspace) { case UndefinedColorspace: case sRGBColorspace: { if (GetImageListLength(image) > 3) combine_image->alpha_trait=BlendPixelTrait; break; } case LinearGRAYColorspace: case GRAYColorspace: { if (GetImageListLength(image) > 1) combine_image->alpha_trait=BlendPixelTrait; break; } case CMYKColorspace: { if (GetImageListLength(image) > 4) combine_image->alpha_trait=BlendPixelTrait; break; } default: break; } /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; Quantum *pixels; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t i; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } next=image; for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++) { register ssize_t x; PixelChannel channel = GetPixelChannelChannel(combine_image,i); PixelTrait traits = GetPixelChannelTraits(combine_image,channel); if (traits == UndefinedPixelTrait) continue; if (next == (Image *) NULL) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const Quantum *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { if (x < (ssize_t) next->columns) { q[i]=GetPixelGray(next,p); p+=GetPixelChannels(next); } q+=GetPixelChannels(combine_image); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CombineImageTag,progress++, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImage() separates a channel from the image and returns it as a % grayscale image. % % The format of the SeparateImage method is: % % Image *SeparateImage(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImage(const Image *image, const ChannelType channel_type,ExceptionInfo *exception) { #define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01) #define SeparateImageTag "Separate/Image" CacheView *image_view, *separate_view; Image *separate_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse) { separate_image=DestroyImage(separate_image); return((Image *) NULL); } separate_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(separate_image,GRAYColorspace,exception); separate_image->gamma=image->gamma; /* Separate image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); separate_view=AcquireAuthenticCacheView(separate_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (GetChannelBit(channel_type,channel) == 0)) continue; SetPixelChannel(separate_image,GrayPixelChannel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(separate_image); } if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SeparateImage) #endif proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } separate_view=DestroyCacheView(separate_view); image_view=DestroyCacheView(image_view); (void) SetImageChannelMask(separate_image,DefaultChannels); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % Image *SeparateImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception) { Image *images, *separate_image; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; separate_image=SeparateImage(image,(ChannelType) (1UL << channel), exception); if (separate_image != (Image *) NULL) AppendImageToList(&images,separate_image); } if (images == (Image *) NULL) images=SeparateImage(image,UndefinedChannel,exception); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelOption alpha_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel, % DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel, % OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, % and TransparentAlphaChannel. % % o exception: return any errors or warnings in this structure. % */ static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p, const double alpha,const Quantum *q,const double beta, Quantum *composite) { double Da, gamma, Sa; register ssize_t i; /* Compose pixel p over pixel q with the given alpha. */ Sa=QuantumScale*alpha; Da=QuantumScale*beta, gamma=Sa*(-Da)+Sa+Da; gamma=PerceptibleReciprocal(gamma); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; switch (channel) { case RedPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->red,alpha)); break; } case GreenPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->green,alpha)); break; } case BluePixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->blue,alpha)); break; } case BlackPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->black,alpha)); break; } case AlphaPixelChannel: { composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da)); break; } default: break; } } } MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelOption alpha_type,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->alpha_trait=BlendPixelTrait; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=CopyPixelTrait; return(status); } case BackgroundAlphaChannel: { /* Set transparent pixels to background color. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,q) == TransparentAlpha) { SetPixelViaPixelInfo(image,&image->background_color,q); SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: { image->alpha_trait=UpdatePixelTrait; status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0, exception); break; } case DeactivateAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=CopyPixelTrait; break; } case DisassociateAlphaChannel: { /* Disassociate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, Sa; register ssize_t i; Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=UndefinedPixelTrait; return(status); } case DiscreteAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=UpdatePixelTrait; break; } case ExtractAlphaChannel: { status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0, exception); image->alpha_trait=UndefinedPixelTrait; break; } case OffAlphaChannel: { image->alpha_trait=UndefinedPixelTrait; break; } case OnAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=BlendPixelTrait; break; } case OpaqueAlphaChannel: { status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case RemoveAlphaChannel: { /* Remove transparency. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { FlattenPixelInfo(image,&image->background_color, image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=image->background_color.alpha_trait; break; } case SetAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case ShapeAlphaChannel: { /* Set alpha channel by shape. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=UpdatePixelTrait; (void) SetImageMask(image,WritePixelMask,image,exception); (void) LevelImageColors(image,&image->background_color, &image->background_color,MagickTrue,exception); (void) SetImageMask(image,WritePixelMask,(Image *) NULL,exception); break; } case TransparentAlphaChannel: { status=SetImageAlpha(image,TransparentAlpha,exception); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); (void) SetPixelChannelMask(image,image->channel_mask); return(SyncImagePixelCache(image,exception)); }
GB_unaryop__ainv_int8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_int32 // op(A') function: GB_tran__ainv_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_int32 ( int8_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB060-matrixmultiply-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Classic i-k-j matrix multiplication */ #include "omprace.h" #include <omp.h> #define N 100 #define M 100 #define K 100 double a[N][M],b[M][K],c[N][K]; int mmm() { int i,j,k; #pragma omp parallel for private(j,k) for (i = 0; i < N; i++) for (k = 0; k < K; k++) for (j = 0; j < M; j++) c[i][j]= c[i][j]+a[i][k]*b[k][j]; return 0; } int main() { omprace_init(); mmm(); omprace_fini(); return 0; }
GB_unaryop__minv_uint64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_int32 // op(A') function: GB_tran__minv_uint64_int32 // C type: uint64_t // A type: int32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_int32 ( uint64_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_uint8 // op(A') function: GB_tran__abs_int32_uint8 // C type: int32_t // A type: uint8_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_uint8 ( int32_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
server.c
// C99 // Start program: mpirun -np 1 server #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdbool.h> #include <unistd.h> // needed for sleep() on POSIX system #define MAX_DATA 100 int main( int argc, char **argv ) { int providedThreadSupport; bool terminateListening = false; char portName[MPI_MAX_PORT_NAME]; MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &providedThreadSupport); if (MPI_THREAD_MULTIPLE != providedThreadSupport) { printf( "Requested MPI thread support is not guaranteed.\n"); } MPI_Open_port(MPI_INFO_NULL, portName); printf("Server available at port:%s\n", portName); #pragma omp parallel num_threads(2) shared(portName,terminateListening) { // Use OpemMP section construct for function parallelism #pragma omp sections { #pragma omp section { // Do some work sleep(15); // Connect to yourself in order to terminate listening terminateListening = true; MPI_Comm dummy; MPI_Comm_connect(portName, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &dummy); printf("Server is connected to itself.\n"); MPI_Comm_disconnect(&dummy); printf("Server is disconnected.\n"); MPI_Close_port(portName); } #pragma omp section { // Listening section while (1) { MPI_Comm interClient = MPI_COMM_NULL; MPI_Comm_accept(portName, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &interClient); if (terminateListening == true) { break; } MPI_Status status; char clientName[MAX_DATA]; MPI_Recv(clientName, MAX_DATA, MPI_CHAR, MPI_ANY_SOURCE, MPI_ANY_TAG, interClient, &status); printf("Client is connected with name: %s\n", clientName); MPI_Comm_disconnect(&interClient); printf("Client is disconnected.\n"); } } } // End of sections } // End of parallel section MPI_Finalize(); return (0); }
pi.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> #include <time.h> #include <math.h> static long MULTIPLIER = 1366; static long ADDEND = 150889; static long PMOD = 714025; long random_last = 0.0; double random_low, random_hi; #define r 1 /* Functions used to generate random numbers */ double randomi() { clock_t t; t=clock(); double tc; tc = (((float)t)/CLOCKS_PER_SEC); double X = fmod(((3*t)+(3/tc)),(3/tc));/*Seed*/ double M = 2147483648,n; int a = 1103515245,c=12345,i; for(i=1;i<1000;i++) { X = fmod((a*X+c),M);// Linear congruence n = (X/(M-1)); double te = (3*3/2)+(0.017453293*(n)*pow(-1,(int) (n*10))); } return n; } // // The monte carlo pi program // int main (int argc, char *argv[]) { int num_trials= atoi(argv[1]); long i; long Ncirc = 0; double pi; //double r = 1.0; // radius of circle. Side of squrare is 2*r #pragma omp parallel for private(i) reduction(+:Ncirc) for(i=0;i<num_trials; i++) { double x = randomi(); double y = randomi(); double test = x*x + y*y; if (test <= r*r) Ncirc++; } pi = 4.0 * ((double)Ncirc/(double)num_trials); printf("\n %ld trials, pi is %f \n",num_trials, pi); return 0; }
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = NumStmtBits + 9 }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; /// The location of the declaration name itself. SourceLocation Loc; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types and 0 otherwise. unsigned FPFeatures : 3; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class GenericSelectionExprBitfields { friend class ASTStmtReader; friend class GenericSelectionExpr; unsigned : NumExprBits; /// The location of the "_Generic". SourceLocation GenericLoc; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 3; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; GenericSelectionExprBitfields GenericSelectionExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only T *. /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *> struct CastIterator : llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *, std::random_access_iterator_tag, TPtr> { using Base = typename CastIterator::iterator_adaptor_base; CastIterator() : Base(nullptr) {} CastIterator(StmtPtr *I) : Base(I) {} typename Base::value_type operator*() const { return cast<T>(*this->I); } }; /// Const iterator for iterating over Stmt * arrays that contain only T *. template <typename T> using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>; using ExprIterator = CastIterator<Expr>; using ConstExprIterator = ConstCastIterator<Expr>; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); body_begin()[size() - 1] = S; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// Represents a statement that could possibly have a value and type. This /// covers expression-statements, as well as labels and attributed statements. /// /// Value statements have a special meaning when they are the last non-null /// statement in a GNU statement expression, where they determine the value /// of the statement expression. class ValueStmt : public Stmt { protected: using Stmt::Stmt; public: const Expr *getExprStmt() const; Expr *getExprStmt() { const ValueStmt *ConstThis = this; return const_cast<Expr*>(ConstThis->getExprStmt()); } static bool classof(const Stmt *T) { return T->getStmtClass() >= firstValueStmtConstant && T->getStmtClass() <= lastValueStmtConstant; } }; /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public ValueStmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public ValueStmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : ValueStmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : ValueStmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
omp-nested-1.c
// { dg-do run } extern void abort(void); #define N 1000 int foo() { int i = 0, j; #pragma omp parallel for num_threads(2) shared (i) for (j = 0; j < N; ++j) { #pragma omp parallel num_threads(1) shared (i) { #pragma omp atomic i++; } } return i; } int main() { if (foo() != N) abort (); return 0; }
core_dgemm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zgemm.c, normal z -> d, Fri Sep 28 17:38:18 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_gemm * * Performs one of the matrix-matrix operations * * \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f] * * where op( X ) is one of: * \f[ op( X ) = X, \f] * \f[ op( X ) = X^T, \f] * \f[ op( X ) = X^T, \f] * * alpha and beta are scalars, and A, B and C are matrices, with op( A ) * an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix. * ******************************************************************************* * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] transb * - PlasmaNoTrans: B is not transposed, * - PlasmaTrans: B is transposed, * - PlasmaConjTrans: B is conjugate transposed. * * @param[in] m * The number of rows of the matrix op( A ) and of the matrix C. * m >= 0. * * @param[in] n * The number of columns of the matrix op( B ) and of the matrix C. * n >= 0. * * @param[in] k * The number of columns of the matrix op( A ) and the number of rows * of the matrix op( B ). k >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] A * An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans, * and is m otherwise. * * @param[in] lda * The leading dimension of the array A. * When transa = PlasmaNoTrans, lda >= max(1,m), * otherwise, lda >= max(1,k). * * @param[in] B * An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans, * and is k otherwise. * * @param[in] ldb * The leading dimension of the array B. * When transb = PlasmaNoTrans, ldb >= max(1,k), * otherwise, ldb >= max(1,n). * * @param[in] beta * The scalar beta. * * @param[in,out] C * An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n * matrix ( alpha*op( A )*op( B ) + beta*C ). * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_dgemm(plasma_enum_t transa, plasma_enum_t transb, int m, int n, int k, double alpha, const double *A, int lda, const double *B, int ldb, double beta, double *C, int ldc) { cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE)transa, (CBLAS_TRANSPOSE)transb, m, n, k, (alpha), A, lda, B, ldb, (beta), C, ldc); } /******************************************************************************/ void plasma_core_omp_dgemm( plasma_enum_t transa, plasma_enum_t transb, int m, int n, int k, double alpha, const double *A, int lda, const double *B, int ldb, double beta, double *C, int ldc, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (transa == PlasmaNoTrans) ak = k; else ak = m; int bk; if (transb == PlasmaNoTrans) bk = n; else bk = k; if (sequence->status == PlasmaSuccess) { int transa_ = transa, transb_ = transb; int size_A = lda*ak, size_B = ldb*bk,size_C =ldc*n; #pragma omp target nowait \ depend(in:A[0:lda*ak]) \ depend(in:B[0:ldb*bk]) \ depend(inout:C[0:ldc*n]) \ firstprivate(m,n,k,alpha,beta,lda,ak) \ firstprivate(ldb,bk,ldc,transa_,transb_) \ map(to:A[0:size_A],B[0:size_B]) \ map(tofrom:C[0:size_C]) { int block_size = 4, size_matrix = lda; int m_new = m/block_size; int n_new = n/block_size; int k_new = k/block_size; int zbeta; #pragma omp parallel #pragma omp single { for (int m_ = 0; m_ < m_new; m_++){ for (int n_ = 0; n_ < n_new; n_++) { for (int k_ = 0; k_ < k_new; k_++) { int lda_ = m_ * block_size + k_ * block_size; int ldb_ = k_ * block_size + n_ * block_size; int ldc_ = m_ * block_size + n_ * block_size; #pragma omp task \ depend(inout:C[ldc_]) { double A_new [block_size*block_size],B_new[block_size*block_size],C_new[block_size*block_size]; int i_a = m_, j_a=k_; int i_b = k_, j_b=n_; int i_c = m_, j_c=n_; int insert = 0; for(int l=0;l<block_size;l++) { int before_a = i_a*block_size+j_a*lda*block_size + l*size_matrix; int before_b = i_b*block_size+j_b*lda*block_size + l*size_matrix; int before_c = i_c*block_size+j_c*lda*block_size + l*size_matrix; for (int i = 0; i < block_size; i++) { A_new[insert] = A[before_a]; B_new[insert] = B[before_b]; C_new[insert] = C[before_c]; insert += 1; before_a += 1; before_b += 1; before_c += 1; } } double zbeta = k_== 0 ? beta : 1.0; plasma_core_dgemm(transa_, transb_, block_size, block_size, block_size, alpha, (const double *) A_new, block_size, (const double *) B_new, block_size, zbeta, (double *) C_new, block_size); insert = 0; for(int l=0;l<block_size;l++) { int before_c = i_c*block_size+j_c*lda*block_size + l*size_matrix; for (int i = 0; i < block_size; i++) { C[before_c]=C_new[insert]; insert += 1; before_c += 1; } } } } } } } } } }
c-tree.h
/* Definitions for C parsing and type checking. Copyright (C) 1987-2019 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_C_TREE_H #define GCC_C_TREE_H #include "c-family/c-common.h" #include "diagnostic.h" /* struct lang_identifier is private to c-decl.c, but langhooks.c needs to know how big it is. This is sanity-checked in c-decl.c. */ #define C_SIZEOF_STRUCT_LANG_IDENTIFIER \ (sizeof(struct c_common_identifier) + 3 * sizeof(void*)) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */ #define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE) /* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is volatile. */ #define C_TYPE_FIELDS_VOLATILE(TYPE) TREE_LANG_FLAG_2(TYPE) /* In a RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE nonzero if the definition of the type has already started. */ #define C_TYPE_BEING_DEFINED(TYPE) TYPE_LANG_FLAG_0(TYPE) /* In an incomplete RECORD_TYPE or UNION_TYPE, a list of variable declarations whose type would be completed by completing that type. */ #define C_TYPE_INCOMPLETE_VARS(TYPE) TYPE_VFIELD(TYPE) /* In an IDENTIFIER_NODE, nonzero if this identifier is actually a keyword. C_RID_CODE (node) is then the RID_* value of the keyword. */ #define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_0(ID) /* Record whether a type or decl was written with nonconstant size. Note that TYPE_SIZE may have simplified to a constant. */ #define C_TYPE_VARIABLE_SIZE(TYPE) TYPE_LANG_FLAG_1(TYPE) #define C_DECL_VARIABLE_SIZE(TYPE) DECL_LANG_FLAG_0(TYPE) /* Record whether a type is defined inside a struct or union type. This is used for -Wc++-compat. */ #define C_TYPE_DEFINED_IN_STRUCT(TYPE) TYPE_LANG_FLAG_2(TYPE) /* Record whether an "incomplete type" error was given for the type. */ #define C_TYPE_ERROR_REPORTED(TYPE) TYPE_LANG_FLAG_3(TYPE) /* Record whether a typedef for type `int' was actually `signed int'. */ #define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1(EXP) /* For a FUNCTION_DECL, nonzero if it was defined without an explicit return type. */ #define C_FUNCTION_IMPLICIT_INT(EXP) DECL_LANG_FLAG_1(EXP) /* For a FUNCTION_DECL, nonzero if it was an implicit declaration. */ #define C_DECL_IMPLICIT(EXP) DECL_LANG_FLAG_2(EXP) /* For a PARM_DECL, nonzero if it was declared as an array. */ #define C_ARRAY_PARAMETER(NODE) DECL_LANG_FLAG_0(NODE) /* For FUNCTION_DECLs, evaluates true if the decl is built-in but has been declared. */ #define C_DECL_DECLARED_BUILTIN(EXP) DECL_LANG_FLAG_3(FUNCTION_DECL_CHECK(EXP)) /* For FUNCTION_DECLs, evaluates true if the decl is built-in, has a built-in prototype and does not have a non-built-in prototype. */ #define C_DECL_BUILTIN_PROTOTYPE(EXP) DECL_LANG_FLAG_6(FUNCTION_DECL_CHECK(EXP)) /* Record whether a decl was declared register. This is strictly a front-end flag, whereas DECL_REGISTER is used for code generation; they may differ for structures with volatile fields. */ #define C_DECL_REGISTER(EXP) DECL_LANG_FLAG_4(EXP) /* Record whether a decl was used in an expression anywhere except an unevaluated operand of sizeof / typeof / alignof. This is only used for functions declared static but not defined, though outside sizeof and typeof it is set for other function decls as well. */ #define C_DECL_USED(EXP) DECL_LANG_FLAG_5(FUNCTION_DECL_CHECK(EXP)) /* Record whether a variable has been declared threadprivate by #pragma omp threadprivate. */ #define C_DECL_THREADPRIVATE_P(DECL) DECL_LANG_FLAG_3(VAR_DECL_CHECK(DECL)) /* Set on VAR_DECLs for compound literals. */ #define C_DECL_COMPOUND_LITERAL_P(DECL) DECL_LANG_FLAG_5(VAR_DECL_CHECK(DECL)) /* Nonzero for a decl which either doesn't exist or isn't a prototype. N.B. Could be simplified if all built-in decls had complete prototypes (but this is presently difficult because some of them need FILE*). */ #define C_DECL_ISNT_PROTOTYPE(EXP) \ (EXP == 0 || (!prototype_p(TREE_TYPE(EXP)) && !fndecl_built_in_p(EXP))) /* For FUNCTION_TYPE, a hidden list of types of arguments. The same as TYPE_ARG_TYPES for functions with prototypes, but created for functions without prototypes. */ #define TYPE_ACTUAL_ARG_TYPES(NODE) TYPE_LANG_SLOT_1(NODE) /* For a CONSTRUCTOR, whether some initializer contains a subexpression meaning it is not a constant expression. */ #define CONSTRUCTOR_NON_CONST(EXPR) TREE_LANG_FLAG_1(CONSTRUCTOR_CHECK(EXPR)) /* For a SAVE_EXPR, nonzero if the operand of the SAVE_EXPR has already been folded. */ #define SAVE_EXPR_FOLDED_P(EXP) TREE_LANG_FLAG_1(SAVE_EXPR_CHECK(EXP)) /* Record parser information about an expression that is irrelevant for code generation alongside a tree representing its value. */ struct c_expr { /* The value of the expression. */ tree value; /* Record the original unary/binary operator of an expression, which may have been changed by fold, STRING_CST for unparenthesized string constants, C_MAYBE_CONST_EXPR for __builtin_constant_p calls (even if parenthesized), for subexpressions, and for non-constant initializers, or ERROR_MARK for other expressions (including parenthesized expressions). */ enum tree_code original_code; /* If not NULL, the original type of an expression. This will differ from the type of the value field for an enum constant. The type of an enum constant is a plain integer type, but this field will be the enum type. */ tree original_type; /* The source range of this expression. This is redundant for node values that have locations, but not all node kinds have locations (e.g. constants, and references to params, locals, etc), so we stash a copy here. */ source_range src_range; /* Access to the first and last locations within the source spelling of this expression. */ location_t get_start() const { return src_range.m_start; } location_t get_finish() const { return src_range.m_finish; } location_t get_location() const { if (EXPR_HAS_LOCATION(value)) return EXPR_LOCATION(value); else return make_location(get_start(), get_start(), get_finish()); } /* Set the value to error_mark_node whilst ensuring that src_range is initialized. */ void set_error() { value = error_mark_node; src_range.m_start = UNKNOWN_LOCATION; src_range.m_finish = UNKNOWN_LOCATION; } }; /* Type alias for struct c_expr. This allows to use the structure inside the VEC types. */ typedef struct c_expr c_expr_t; /* A kind of type specifier. Note that this information is currently only used to distinguish tag definitions, tag references and typeof uses. */ enum c_typespec_kind { /* No typespec. This appears only in struct c_declspec. */ ctsk_none, /* A reserved keyword type specifier. */ ctsk_resword, /* A reference to a tag, previously declared, such as "struct foo". This includes where the previous declaration was as a different kind of tag, in which case this is only valid if shadowing that tag in an inner scope. */ ctsk_tagref, /* A reference to a tag, not previously declared in a visible scope. */ ctsk_tagfirstref, /* A definition of a tag such as "struct foo { int a; }". */ ctsk_tagdef, /* A typedef name. */ ctsk_typedef, /* An ObjC-specific kind of type specifier. */ ctsk_objc, /* A typeof specifier, or _Atomic ( type-name ). */ ctsk_typeof }; /* A type specifier: this structure is created in the parser and passed to declspecs_add_type only. */ struct c_typespec { /* What kind of type specifier this is. */ enum c_typespec_kind kind; /* Whether the expression has operands suitable for use in constant expressions. */ bool expr_const_operands; /* The specifier itself. */ tree spec; /* An expression to be evaluated before the type specifier, in the case of typeof specifiers, or NULL otherwise or if no such expression is required for a particular typeof specifier. In particular, when typeof is applied to an expression of variably modified type, that expression must be evaluated in order to determine array sizes that form part of the type, but the expression itself (as opposed to the array sizes) forms no part of the type and so needs to be recorded separately. */ tree expr; }; /* A storage class specifier. */ enum c_storage_class { csc_none, csc_auto, csc_extern, csc_register, csc_static, csc_typedef }; /* A type specifier keyword "void", "_Bool", "char", "int", "float", "double", "_Decimal32", "_Decimal64", "_Decimal128", "_Fract", "_Accum", or none of these. */ enum c_typespec_keyword { cts_none, cts_void, cts_bool, cts_char, cts_int, cts_float, cts_int_n, cts_double, cts_dfloat32, cts_dfloat64, cts_dfloat128, cts_floatn_nx, cts_fract, cts_accum, cts_auto_type }; /* This enum lists all the possible declarator specifiers, storage class or attribute that a user can write. There is at least one enumerator per possible declarator specifier in the struct c_declspecs below. It is used to index the array of declspec locations in struct c_declspecs. */ enum c_declspec_word { cdw_typespec /* A catch-all for a typespec. */, cdw_storage_class /* A catch-all for a storage class */, cdw_attributes, cdw_typedef, cdw_explicit_signed, cdw_deprecated, cdw_default_int, cdw_long, cdw_long_long, cdw_short, cdw_signed, cdw_unsigned, cdw_complex, cdw_inline, cdw_noreturn, cdw_thread, cdw_const, cdw_volatile, cdw_restrict, cdw_atomic, cdw_saturating, cdw_alignas, cdw_address_space, cdw_gimple, cdw_rtl, cdw_number_of_elements /* This one must always be the last enumerator. */ }; enum c_declspec_il { cdil_none, cdil_gimple, /* __GIMPLE */ cdil_gimple_cfg, /* __GIMPLE(cfg) */ cdil_gimple_ssa, /* __GIMPLE(ssa) */ cdil_rtl /* __RTL */ }; /* A sequence of declaration specifiers in C. When a new declaration specifier is added, please update the enum c_declspec_word above accordingly. */ struct c_declspecs { location_t locations[cdw_number_of_elements]; /* The type specified, if a single type specifier such as a struct, union or enum specifier, typedef name or typeof specifies the whole type, or NULL_TREE if none or a keyword such as "void" or "char" is used. Does not include qualifiers. */ tree type; /* Any expression to be evaluated before the type, from a typeof specifier. */ tree expr; /* The attributes from a typedef decl. */ tree decl_attr; /* When parsing, the attributes. Outside the parser, this will be NULL; attributes (possibly from multiple lists) will be passed separately. */ tree attrs; /* The pass to start compiling a __GIMPLE or __RTL function with. */ char* gimple_or_rtl_pass; /* The base-2 log of the greatest alignment required by an _Alignas specifier, in bytes, or -1 if no such specifiers with nonzero alignment. */ int align_log; /* For the __intN declspec, this stores the index into the int_n_* arrays. */ int int_n_idx; /* For the _FloatN and _FloatNx declspec, this stores the index into the floatn_nx_types array. */ int floatn_nx_idx; /* The storage class specifier, or csc_none if none. */ enum c_storage_class storage_class; /* Any type specifier keyword used such as "int", not reflecting modifiers such as "short", or cts_none if none. */ ENUM_BITFIELD(c_typespec_keyword) typespec_word : 8; /* The kind of type specifier if one has been seen, ctsk_none otherwise. */ ENUM_BITFIELD(c_typespec_kind) typespec_kind : 3; ENUM_BITFIELD(c_declspec_il) declspec_il : 3; /* Whether any expressions in typeof specifiers may appear in constant expressions. */ BOOL_BITFIELD expr_const_operands : 1; /* Whether any declaration specifiers have been seen at all. */ BOOL_BITFIELD declspecs_seen_p : 1; /* Whether something other than a storage class specifier or attribute has been seen. This is used to warn for the obsolescent usage of storage class specifiers other than at the start of the list. (Doing this properly would require function specifiers to be handled separately from storage class specifiers.) */ BOOL_BITFIELD non_sc_seen_p : 1; /* Whether the type is specified by a typedef or typeof name. */ BOOL_BITFIELD typedef_p : 1; /* Whether the type is explicitly "signed" or specified by a typedef whose type is explicitly "signed". */ BOOL_BITFIELD explicit_signed_p : 1; /* Whether the specifiers include a deprecated typedef. */ BOOL_BITFIELD deprecated_p : 1; /* Whether the type defaulted to "int" because there were no type specifiers. */ BOOL_BITFIELD default_int_p : 1; /* Whether "long" was specified. */ BOOL_BITFIELD long_p : 1; /* Whether "long" was specified more than once. */ BOOL_BITFIELD long_long_p : 1; /* Whether "short" was specified. */ BOOL_BITFIELD short_p : 1; /* Whether "signed" was specified. */ BOOL_BITFIELD signed_p : 1; /* Whether "unsigned" was specified. */ BOOL_BITFIELD unsigned_p : 1; /* Whether "complex" was specified. */ BOOL_BITFIELD complex_p : 1; /* Whether "inline" was specified. */ BOOL_BITFIELD inline_p : 1; /* Whether "_Noreturn" was speciied. */ BOOL_BITFIELD noreturn_p : 1; /* Whether "__thread" or "_Thread_local" was specified. */ BOOL_BITFIELD thread_p : 1; /* Whether "__thread" rather than "_Thread_local" was specified. */ BOOL_BITFIELD thread_gnu_p : 1; /* Whether "const" was specified. */ BOOL_BITFIELD const_p : 1; /* Whether "volatile" was specified. */ BOOL_BITFIELD volatile_p : 1; /* Whether "restrict" was specified. */ BOOL_BITFIELD restrict_p : 1; /* Whether "_Atomic" was specified. */ BOOL_BITFIELD atomic_p : 1; /* Whether "_Sat" was specified. */ BOOL_BITFIELD saturating_p : 1; /* Whether any alignment specifier (even with zero alignment) was specified. */ BOOL_BITFIELD alignas_p : 1; /* The address space that the declaration belongs to. */ addr_space_t address_space; }; /* The various kinds of declarators in C. */ enum c_declarator_kind { /* An identifier. */ cdk_id, /* A function. */ cdk_function, /* An array. */ cdk_array, /* A pointer. */ cdk_pointer, /* Parenthesized declarator with nested attributes. */ cdk_attrs }; struct c_arg_tag { /* The argument name. */ tree id; /* The type of the argument. */ tree type; }; /* Information about the parameters in a function declarator. */ struct c_arg_info { /* A list of parameter decls. */ tree parms; /* A list of structure, union and enum tags defined. */ vec<c_arg_tag, va_gc>* tags; /* A list of argument types to go in the FUNCTION_TYPE. */ tree types; /* A list of non-parameter decls (notably enumeration constants) defined with the parameters. */ tree others; /* A compound expression of VLA sizes from the parameters, or NULL. In a function definition, these are used to ensure that side-effects in sizes of arrays converted to pointers (such as a parameter int i[n++]) take place; otherwise, they are ignored. */ tree pending_sizes; /* True when these arguments had [*]. */ BOOL_BITFIELD had_vla_unspec : 1; }; /* A declarator. */ struct c_declarator { /* The kind of declarator. */ enum c_declarator_kind kind; location_t id_loc; /* Currently only set for cdk_id, cdk_array. */ /* Except for cdk_id, the contained declarator. For cdk_id, NULL. */ struct c_declarator* declarator; union { /* For identifiers, an IDENTIFIER_NODE or NULL_TREE if an abstract declarator. */ tree id; /* For functions. */ struct c_arg_info* arg_info; /* For arrays. */ struct { /* The array dimension, or NULL for [] and [*]. */ tree dimen; /* The qualifiers inside []. */ int quals; /* The attributes (currently ignored) inside []. */ tree attrs; /* Whether [static] was used. */ BOOL_BITFIELD static_p : 1; /* Whether [*] was used. */ BOOL_BITFIELD vla_unspec_p : 1; } array; /* For pointers, the qualifiers on the pointer type. */ int pointer_quals; /* For attributes. */ tree attrs; } u; }; /* A type name. */ struct c_type_name { /* The declaration specifiers. */ struct c_declspecs* specs; /* The declarator. */ struct c_declarator* declarator; }; /* A parameter. */ struct c_parm { /* The declaration specifiers, minus any prefix attributes. */ struct c_declspecs* specs; /* The attributes. */ tree attrs; /* The declarator. */ struct c_declarator* declarator; /* The location of the parameter. */ location_t loc; }; /* Used when parsing an enum. Initialized by start_enum. */ struct c_enum_contents { /* While defining an enum type, this is 1 plus the last enumerator constant value. */ tree enum_next_value; /* Nonzero means that there was overflow computing enum_next_value. */ int enum_overflow; }; /* A type of reference to a static identifier in an inline function. */ enum c_inline_static_type { /* Identifier with internal linkage used in function that may be an inline definition (i.e., file-scope static). */ csi_internal, /* Modifiable object with static storage duration defined in function that may be an inline definition (i.e., local static). */ csi_modifiable }; /* in c-parser.c */ extern void c_parse_init(void); extern bool c_keyword_starts_typename(enum rid keyword); /* in c-aux-info.c */ extern void gen_aux_info_record(tree, int, int, int); /* in c-decl.c */ struct c_spot_bindings; struct c_struct_parse_info; extern struct obstack parser_obstack; extern tree c_break_label; extern tree c_cont_label; extern bool global_bindings_p(void); extern tree pushdecl(tree); extern void push_scope(void); extern tree pop_scope(void); extern void c_bindings_start_stmt_expr(struct c_spot_bindings*); extern void c_bindings_end_stmt_expr(struct c_spot_bindings*); extern void record_inline_static(location_t, tree, tree, enum c_inline_static_type); extern void c_init_decl_processing(void); extern void c_print_identifier(FILE*, tree, int); extern int quals_from_declspecs(const struct c_declspecs*); extern struct c_declarator* build_array_declarator(location_t, tree, struct c_declspecs*, bool, bool); extern tree build_enumerator(location_t, location_t, struct c_enum_contents*, tree, tree); extern tree check_for_loop_decls(location_t, bool); extern void mark_forward_parm_decls(void); extern void declare_parm_level(void); extern void undeclared_variable(location_t, tree); extern tree lookup_label_for_goto(location_t, tree); extern tree declare_label(tree); extern tree define_label(location_t, tree); extern struct c_spot_bindings* c_get_switch_bindings(void); extern void c_release_switch_bindings(struct c_spot_bindings*); extern bool c_check_switch_jump_warnings(struct c_spot_bindings*, location_t, location_t); extern void finish_decl(tree, location_t, tree, tree, tree); extern tree finish_enum(tree, tree, tree); extern void finish_function(void); extern tree finish_struct(location_t, tree, tree, tree, struct c_struct_parse_info*); extern struct c_arg_info* build_arg_info(void); extern struct c_arg_info* get_parm_info(bool, tree); extern tree grokfield(location_t, struct c_declarator*, struct c_declspecs*, tree, tree*); extern tree groktypename(struct c_type_name*, tree*, bool*); extern tree grokparm(const struct c_parm*, tree*); extern tree implicitly_declare(location_t, tree); extern void keep_next_level(void); extern void pending_xref_error(void); extern void c_push_function_context(void); extern void c_pop_function_context(void); extern void push_parm_decl(const struct c_parm*, tree*); extern struct c_declarator* set_array_declarator_inner(struct c_declarator*, struct c_declarator*); extern tree c_builtin_function(tree); extern tree c_builtin_function_ext_scope(tree); extern void shadow_tag(const struct c_declspecs*); extern void shadow_tag_warned(const struct c_declspecs*, int); extern tree start_enum(location_t, struct c_enum_contents*, tree); extern bool start_function(struct c_declspecs*, struct c_declarator*, tree); extern tree start_decl(struct c_declarator*, struct c_declspecs*, bool, tree); extern tree start_struct(location_t, enum tree_code, tree, struct c_struct_parse_info**); extern void store_parm_decls(void); extern void store_parm_decls_from(struct c_arg_info*); extern void temp_store_parm_decls(tree, tree); extern void temp_pop_parm_decls(void); extern tree xref_tag(enum tree_code, tree); extern struct c_typespec parser_xref_tag(location_t, enum tree_code, tree); extern struct c_parm* build_c_parm(struct c_declspecs*, tree, struct c_declarator*, location_t); extern struct c_declarator* build_attrs_declarator(tree, struct c_declarator*); extern struct c_declarator* build_function_declarator(struct c_arg_info*, struct c_declarator*); extern struct c_declarator* build_id_declarator(tree); extern struct c_declarator* make_pointer_declarator(struct c_declspecs*, struct c_declarator*); extern struct c_declspecs* build_null_declspecs(void); extern struct c_declspecs* declspecs_add_qual(location_t, struct c_declspecs*, tree); extern struct c_declspecs* declspecs_add_type(location_t, struct c_declspecs*, struct c_typespec); extern struct c_declspecs* declspecs_add_scspec(location_t, struct c_declspecs*, tree); extern struct c_declspecs* declspecs_add_attrs(location_t, struct c_declspecs*, tree); extern struct c_declspecs* declspecs_add_addrspace(location_t, struct c_declspecs*, addr_space_t); extern struct c_declspecs* declspecs_add_alignas(location_t, struct c_declspecs*, tree); extern struct c_declspecs* finish_declspecs(struct c_declspecs*); /* in c-objc-common.c */ extern bool c_objc_common_init(void); extern bool c_missing_noreturn_ok_p(tree); extern bool c_warn_unused_global_decl(const_tree); extern void c_initialize_diagnostics(diagnostic_context*); extern bool c_vla_unspec_p(tree x, tree fn); extern alias_set_type c_get_alias_set(tree); /* in c-typeck.c */ extern int in_alignof; extern int in_sizeof; extern int in_typeof; extern tree c_last_sizeof_arg; extern location_t c_last_sizeof_loc; extern struct c_switch* c_switch_stack; extern tree c_objc_common_truthvalue_conversion(location_t, tree); extern tree require_complete_type(location_t, tree); extern bool same_translation_unit_p(const_tree, const_tree); extern int comptypes(tree, tree); extern int comptypes_check_different_types(tree, tree, bool*); extern bool c_vla_type_p(const_tree); extern bool c_mark_addressable(tree, bool = false); extern void c_incomplete_type_error(location_t, const_tree, const_tree); extern tree c_type_promotes_to(tree); extern struct c_expr default_function_array_conversion(location_t, struct c_expr); extern struct c_expr default_function_array_read_conversion(location_t, struct c_expr); extern struct c_expr convert_lvalue_to_rvalue(location_t, struct c_expr, bool, bool); extern tree decl_constant_value_1(tree, bool); extern void mark_exp_read(tree); extern tree composite_type(tree, tree); extern tree build_component_ref(location_t, tree, tree, location_t); extern tree build_array_ref(location_t, tree, tree); extern tree build_external_ref(location_t, tree, bool, tree*); extern void pop_maybe_used(bool); extern struct c_expr c_expr_sizeof_expr(location_t, struct c_expr); extern struct c_expr c_expr_sizeof_type(location_t, struct c_type_name*); extern struct c_expr parser_build_unary_op(location_t, enum tree_code, struct c_expr); extern struct c_expr parser_build_binary_op(location_t, enum tree_code, struct c_expr, struct c_expr); extern tree build_conditional_expr(location_t, tree, bool, tree, tree, location_t, tree, tree, location_t); extern tree build_compound_expr(location_t, tree, tree); extern tree c_cast_expr(location_t, struct c_type_name*, tree); extern tree build_c_cast(location_t, tree, tree); extern void store_init_value(location_t, tree, tree, tree); extern void maybe_warn_string_init(location_t, tree, struct c_expr); extern void start_init(tree, tree, int, rich_location*); extern void finish_init(void); extern void really_start_incremental_init(tree); extern void finish_implicit_inits(location_t, struct obstack*); extern void push_init_level(location_t, int, struct obstack*); extern struct c_expr pop_init_level(location_t, int, struct obstack*, location_t); extern void set_init_index(location_t, tree, tree, struct obstack*); extern void set_init_label(location_t, tree, location_t, struct obstack*); extern void process_init_element(location_t, struct c_expr, bool, struct obstack*); extern tree build_compound_literal(location_t, tree, tree, bool, unsigned int); extern void check_compound_literal_type(location_t, struct c_type_name*); extern tree c_start_case(location_t, location_t, tree, bool); extern void c_finish_case(tree, tree); extern tree build_asm_expr(location_t, tree, tree, tree, tree, tree, bool, bool); extern tree build_asm_stmt(bool, tree); extern int c_types_compatible_p(tree, tree); extern tree c_begin_compound_stmt(bool); extern tree c_end_compound_stmt(location_t, tree, bool); extern void c_finish_if_stmt(location_t, tree, tree, tree); extern void c_finish_loop(location_t, tree, tree, tree, tree, tree, bool); extern tree c_begin_stmt_expr(void); extern tree c_finish_stmt_expr(location_t, tree); extern tree c_process_expr_stmt(location_t, tree); extern tree c_finish_expr_stmt(location_t, tree); extern tree c_finish_return(location_t, tree, tree); extern tree c_finish_bc_stmt(location_t, tree*, bool); extern tree c_finish_goto_label(location_t, tree); extern tree c_finish_goto_ptr(location_t, tree); extern tree c_expr_to_decl(tree, bool*, bool*); extern tree c_finish_omp_construct(location_t, enum tree_code, tree, tree); extern tree c_finish_oacc_data(location_t, tree, tree); extern tree c_finish_oacc_host_data(location_t, tree, tree); extern tree c_begin_omp_parallel(void); extern tree c_finish_omp_parallel(location_t, tree, tree); extern tree c_begin_omp_task(void); extern tree c_finish_omp_task(location_t, tree, tree); extern void c_finish_omp_cancel(location_t, tree); extern void c_finish_omp_cancellation_point(location_t, tree); extern tree c_finish_omp_clauses(tree, enum c_omp_region_type); extern tree c_build_va_arg(location_t, tree, location_t, tree); extern tree c_finish_transaction(location_t, tree, int); extern bool c_tree_equal(tree, tree); extern tree c_build_function_call_vec(location_t, vec<location_t>, tree, vec<tree, va_gc>*, vec<tree, va_gc>*); extern tree c_omp_clause_copy_ctor(tree, tree, tree); /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ extern int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ extern int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ extern int current_function_returns_abnormally; /* In c-decl.c */ /* Tell the binding oracle what kind of binding we are looking for. */ enum c_oracle_request { C_ORACLE_SYMBOL, C_ORACLE_TAG, C_ORACLE_LABEL }; /* If this is non-NULL, then it is a "binding oracle" which can lazily create bindings when needed by the C compiler. The oracle is told the name and type of the binding to create. It can call pushdecl or the like to ensure the binding is visible; or do nothing, leaving the binding untouched. c-decl.c takes note of when the oracle has been called and will not call it again if it fails to create a given binding. */ typedef void c_binding_oracle_function(enum c_oracle_request, tree identifier); extern c_binding_oracle_function* c_binding_oracle; extern void c_finish_incomplete_decl(tree); extern tree c_omp_reduction_id(enum tree_code, tree); extern tree c_omp_reduction_decl(tree); extern tree c_omp_reduction_lookup(tree, tree); extern tree c_check_omp_declare_reduction_r(tree*, int*, void*); extern void c_pushtag(location_t, tree, tree); extern void c_bind(location_t, tree, bool); extern bool tag_exists_p(enum tree_code, tree); /* In c-errors.c */ extern bool pedwarn_c90(location_t, int opt, const char*, ...) ATTRIBUTE_GCC_DIAG(3, 4); extern bool pedwarn_c99(location_t, int opt, const char*, ...) ATTRIBUTE_GCC_DIAG(3, 4); extern bool pedwarn_c11(location_t, int opt, const char*, ...) ATTRIBUTE_GCC_DIAG(3, 4); extern void set_c_expr_source_range(c_expr* expr, location_t start, location_t finish); extern void set_c_expr_source_range(c_expr* expr, source_range src_range); /* In c-fold.c */ extern vec<tree> incomplete_record_decls; #if CHECKING_P namespace selftest { extern void run_c_tests(void); } // namespace selftest #endif /* #if CHECKING_P */ #endif /* ! GCC_C_TREE_H */
tammes.c
#include <string.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #ifndef M_PI # define M_PI 3.141592653589793 #endif #define GA 2.39996322972865332 int frame = 0; int asymmetric = 1; int N; double minD = 0; double bestMinD = 0; typedef struct { double x,y,z; } vec3; vec3* pos; vec3* v; vec3* best; double urandom() { return 2. * rand() / (RAND_MAX+1.) - 1.; } double norm(vec3* v) { return sqrt(v->x*v->x+v->y*v->y+v->z*v->z); } double norm2(vec3* v) { return v->x*v->x+v->y*v->y+v->z*v->z; } void normalize(vec3* v) { double n = norm(v); if (n == 0.) { v->x = 1; v->y = 0; v->z = 0; } else { v->x /= n; v->y /= n; v->z /= n; } } double distance(vec3* v1,vec3* v2) { double dx = v1->x-v2->x; double dy = v1->y-v2->y; double dz = v1->z-v2->z; return sqrt(dx*dx+dy*dy+dz*dz); } double distanceSq(vec3* v1,vec3* v2) { double dx = v1->x-v2->x; double dy = v1->y-v2->y; double dz = v1->z-v2->z; return dx*dx+dy*dy+dz*dz; } void *safemalloc(long n) { void* p = malloc(n); if (p == NULL) { fprintf(stderr, "Out of memory (requested %ld).\n", n); exit(3); } return p; } vec3 add(vec3 a, vec3 b) { vec3 c; c.x = a.x + b.x; c.y = a.y + b.y; c.z = a.z + b.z; return c; } vec3 sub(vec3 a, vec3 b) { vec3 c; c.x = a.x - b.x; c.y = a.y - b.y; c.z = a.z - b.z; return c; } vec3 scale(double a, vec3 b) { vec3 c; c.x = a * b.x; c.y = a * b.y; c.z = a * b.z; return c; } vec3 cross(vec3 a, vec3 b) { vec3 c; c.x = a.y * b.z - a.z * b.y; c.y = a.z * b.x - a.x * b.z; c.z = a.x * b.y - a.y * b.x; return c; } // returns A if the points are not on a triangle vec3 circumcenter(vec3 A, vec3 B, vec3 C) { // https://en.wikipedia.org/wiki/Circumscribed_circle vec3 a = sub(A,C); vec3 b = sub(B,C); vec3 ab = cross(a,b); vec3 top = cross(sub(scale(norm2(&a), b), scale(norm2(&b),a)), ab); double n2 = norm2(&ab); if (n2 == 0) { return A; } else { return add(C, scale(0.5/norm2(&ab), top)); } } void closest(vec3* pos, int i, int n, double* d, int* index) { int j; for (j=0; j<n; j++) { d[j] = 3; index[j] = -1; } for (j=0; j<N; j++) { double dist; if (j != i) { dist = distance(&pos[j], &pos[i]); int k; if (dist < d[n-1]) { k = n-1; while (k > 0 && dist < d[k-1]) { k--; } int l; for (l=n-1 ; l > k ; l--) { d[l] = d[l-1]; index[l] = index[l-1]; } d[k] = dist; index[k] = j; } } } } void calculateMinD(void) { double minD2 = 4; int i; int N0 = (N % 2 || asymmetric) ? N : N/2; for (i=0;i<N0;i++) { double d2; int j; for (j=i+1;j<N;j++) { d2 = distanceSq(&pos[i],&pos[j]); if (d2 < minD2) minD2 = d2; } } minD = sqrt(minD2); } double maxMinD(vec3* pos) { double maxMinD2 = -1; int i; // int N0 = (N % 2) ? N : N/2; for (i=0;i<N;i++) { double minD2; double d2; int j; minD2 = 4; for (j=0;j<N;j++) { if (j!=i) { d2 = distanceSq(&pos[i],&pos[j]); if (d2 < minD2) minD2 = d2; } } if (minD2 > maxMinD2) { maxMinD2 = minD2; } } return sqrt(maxMinD2); } void update(double approxDx,double p,double minus,double friction) { int i; int j; double maxV = 0; double thisV; int N0 = (N % 2 || asymmetric) ? N : N/2; vec3* newV = safemalloc(sizeof(vec3) * N0); for (i=0;i<N0;i++) { thisV = norm(&v[i]); if (thisV > maxV) maxV = thisV; } if (maxV == 0) maxV = 10; double dt = approxDx / maxV; if (dt * friction > 0.75) dt = 0.75 / friction; #pragma omp parallel #pragma omp for private(i,j) for (i=0;i<N0;i++) { double d; double factor; newV[i].x = v[i].x * (1. - dt * friction); newV[i].y = v[i].y * (1. - dt * friction); newV[i].z = v[i].z * (1. - dt * friction); for (j=0;j<N;j++) { if (i==j) continue; d = distance(&pos[i],&pos[j]); if (d == 0) { newV[i].x += urandom() * 0.001 * dt; newV[i].y += urandom() * 0.001 * dt; newV[i].z += urandom() * 0.001 * dt; fprintf(stderr, "Collision %d %d: %.9f %.9f %.9f\n", i,j,v[i].x,v[i].y,v[i].z); continue; } factor = dt / pow(d-minus,p+1); newV[i].x += factor * (pos[i].x - pos[j].x); newV[i].y += factor * (pos[i].y - pos[j].y); newV[i].z += factor * (pos[i].z - pos[j].z); } } double n; for (i=0;i<N0;i++) { // Euler-Cromer pos[i].x += dt * newV[i].x / 2.; pos[i].y += dt * newV[i].y / 2.; pos[i].z += dt * newV[i].z / 2.; n = norm(&pos[i]); if (n==0) { pos[i].x=1; pos[i].y=0; pos[i].z=0; } else { pos[i].x /= n; pos[i].y /= n; pos[i].z /= n; v[i] = newV[i]; } if (N0 < N) { pos[N0+i].x = -pos[i].x; pos[N0+i].y = -pos[i].y; pos[N0+i].z = -pos[i].z; } } calculateMinD(); free(newV); } double latitude(vec3* v) { return 180. / M_PI * atan2(v->z, sqrt(v->x*v->x+v->y*v->y)); } double longitude(vec3* v) { if (v->x == 0 && v->y == 0) return 0; return 180. / M_PI * atan2(v->x, v->y); } void usage(void) { fprintf(stderr, "tammes [-animate | -scad] [-repeat repeatCount] [-friction frictionMultiplier] nPoints [nIterations]\n"); } void dumpFrame(vec3* positions, double minD) { int i; printf("minD %.9f\n", minD); for(i=0;i<N;i++) printf("pos %d %.9f %.9f %.9f\n", i, positions[i].x, positions[i].y, positions[i].z); printf("frame %d\n", frame++); fflush(stdout); } double minDAmongNeighbors(vec3* pos, vec3* base, int* neighbors) { int j; double closestD = 3; for (j=0; j<6; j++) { double d = distance(base, &pos[neighbors[j]]); if (d<closestD) closestD = d; } return closestD; } void cleanupFrom(vec3* from, vec3* pos, int i, int* neighbors, double eps, double maxMove) { vec3 move; move.x = pos[i].x - from->x; move.y = pos[i].y - from->y; move.z = pos[i].z - from->z; normalize(&move); double a; double myMinD = minDAmongNeighbors(pos, &pos[i], neighbors); vec3 adjusted; vec3 base = pos[i]; for (a = eps; a <= maxMove; a += eps) { adjusted.x = base.x + a * move.x; adjusted.y = base.y + a * move.y; adjusted.z = base.z + a * move.z; normalize(&adjusted); double adjMinD = minDAmongNeighbors(pos, &adjusted, neighbors); if (adjMinD < myMinD) break; myMinD = adjMinD; pos[i] = adjusted; } } double closestPointOtherThan(vec3* base, vec3* pos, int omit) { double bestD2 = 3; int i; for (i=0; i<N; i++) { if (i != omit) { double d2 = distanceSq(base, &pos[i]); if (d2 < bestD2) { bestD2 = d2; } } } return sqrt(bestD2); } // A bit of greedy local optimization where we try the circumcenters of all triangles // formed by the six closest neighbors of a given point to see if we can make the // given point do better. void cleanupPointCircumcenter(vec3* pos, int i) { double d[6]; int index[6]; closest(pos, i, 6, d, index); vec3 bestPoint = pos[i]; double bestD = 0; double dist; int a,b,c; for (a = 0; a < 6-2; a++) for(b = a+1; b < 6-1 ; b++) for (c = b+1 ; c < 6 ; c++) { vec3 circ = circumcenter(pos[index[a]], pos[index[b]], pos[index[c]]); normalize(&circ); dist = closestPointOtherThan(&circ, pos, i); if (dist > bestD) { bestPoint = circ; bestD = dist; } } pos[i] = bestPoint; } void cleanup(vec3* pos) { if (N < 7) return; int i; for (i=0; i<N; i++) cleanupPointCircumcenter(pos, i); } int main(int argc, char** argv) { int nIter = 500; int repeats = 1; int animation = 0; int scad = 0; int golden = 0; double frictionMultiplier = 0.16; vec3* bestInRun; double bestMinDInRun = 0; while (argc >= 2 && argv[1][0] == '-') { switch(argv[1][1]) { case 'g': golden = 1; break; case 'a': animation = 1; break; case 's': scad = 1; break; case 'r': if (argc < 3) { usage(); return 1; } repeats = atoi(argv[2]); argc--; argv++; break; case 'f': if (argc < 3) { usage(); return 1; } frictionMultiplier = atof(argv[2]); argc--; argv++; break; default: break; } argc--; argv++; } if (argc < 2) { usage(); return 1; } if (golden) asymmetric = 1; N = atoi(argv[1]); if (argc >= 3) { nIter = atoi(argv[2]); if (argc >= 4) frictionMultiplier = atof(argv[3]); } pos = safemalloc(sizeof(vec3)*N); // we waste a bit of memory when N is even, but memory is cheap v = safemalloc(sizeof(vec3)*N); best = safemalloc(sizeof(vec3)*N); bestInRun = safemalloc(sizeof(vec3)*N); srand((unsigned int)time(0)); vec3 origin; origin.x = origin.y = origin.z = 0.; int i; // impose antipodal symmetry (or almost if N is odd), using idea of https://math.mit.edu/research/highschool/rsi/documents/2012Gautam.pdf int N0 = asymmetric ? N : (N+1)/2; int r; for (r=0;r<repeats;r++) { if (golden) { for (i=0;i<N;i++) { double ratio = (double)(i+1)/(N+1); pos[i].x = 2 * sqrt( (1-ratio) * ratio ) * cos(i * GA); pos[i].y = 2 * sqrt( (1-ratio) * ratio ) * sin(i * GA); pos[i].z = 1-2*ratio; v[i].x = 0; v[i].y = 0; v[i].z = 0; } } else { for (i=0;i<N0;i++) { double n; do { pos[i].x = urandom(); pos[i].y = urandom(); pos[i].z = urandom(); n = norm(&pos[i]); } while (n > 1 || n == 0.); pos[i].x /= n; pos[i].y /= n; pos[i].z /= n; v[i].x = 0; v[i].y = 0; v[i].z = 0; if (N0+i < N) { pos[N0+i].x = -pos[i].x; pos[N0+i].y = -pos[i].y; pos[N0+i].z = -pos[i].z; v[N0+i].x = 0; v[N0+i].y = 0; v[N0+i].z = 0; } } } double nextShow = 0; calculateMinD(); if (minD > bestMinD) { for (i=0;i<N;i++) best[i] = pos[i]; bestMinD = minD; } for (i=0;i<N;i++) bestInRun[i] = pos[i]; bestMinDInRun = minD; if (animation) { printf("n %d\n",N); dumpFrame(pos,minD); } for (i=0;i<nIter;i++) { double p = 1+i*(8.-1)/nIter; if (p>4.5) p=4.5; // 7,4.5,3,10,0 : 0.153 double minus; if (p >= 1) { minus = 0.9 * minD * i / nIter; } else { minus = 0; } update(.3*minD+0.00000001, p, minus, frictionMultiplier*N); // 0.0005/N,p); if (minD > bestMinD) { int j; for(j=0;j<N;j++) { best[j] = pos[j]; } bestMinD = minD; } if (minD > bestMinDInRun) { int j; for (j=0;j<N;j++) bestInRun[j] = pos[j]; bestMinDInRun = minD; } if ((double)i/(nIter-1) >= nextShow || i == nIter-1) { fprintf(stderr, "%.0f%% minD=%.5f maxMinD=%.5f bestD=%.5f bestMaxMinD=%.5f p=%.5f \r", 100.*i/(nIter-1), minD, maxMinD(pos), bestMinD, maxMinD(best), p); nextShow += 0.05; } if (animation) dumpFrame(pos,minD); } fprintf(stderr, "\n"); if (N >= 7) { for (i=0;i<N;i++) pos[i] = bestInRun[i]; for (i=0;i<70;i++) { cleanup(pos); calculateMinD(); if (animation) dumpFrame(pos,minD); fprintf(stderr, "clean(%d) minD=%.5f maxMinD=%.5f \r", i, minD, maxMinD(pos)); } if (minD > bestMinD) { bestMinD = minD; for (i=0;i<N;i++) best[i] = pos[i]; } fprintf(stderr, "\n"); } } fprintf(stderr, "best: minD=%.5f maxMinD=%.5f\n", bestMinD, maxMinD(best)); if (animation) dumpFrame(best,bestMinD); else if (scad) { puts( "diameter=42.7;\n" "split=false;\n" "horizontalTolerance = 0.3; // for joining halves\n" "verticalTolerance = 1.2;\n" "chamfer = 0.5;\n"); printf("n=%d;\n" "minD=%.9f;\n\n", N, bestMinD); //printf("bumpR = 2*sin((1/2)*asin(minD/2));\n"); printf("points = ["); for(i=0;i<N;i++) { printf("[%.9f,%.9f]", latitude(&best[i]), longitude(&best[i])); if (i+1 < N) putchar(','); } puts ("];\n\n" "module dimple() {\n" " translate([0,0,1]) sphere(d=minD,$fn=12);\n" "}\n\n" "module dimples() {\n" " union() {\n" " for(i=[0:len(points)-1]) rotate([0,0,points[i][1]]) rotate([90-points[i][0],0,0]) dimple();\n" " }\n" "}\n\n" "module golfball() {\n" " render(convexity=2)\n" " scale(diameter/2)\n" " difference() { sphere(r=1,$fn=36); dimples();}\n" "}\n\n" "module half(upper=true) {\n" " d1 = diameter*.4+2*horizontalTolerance;\n" " render(convexity=1) difference() {\n" " rotate([upper?0:180,0,0]) golfball();\n" " translate([0,0,-diameter]) cube(size=diameter*2,center=true);\n" " translate([0,0,-diameter*.25-verticalTolerance])\n" " cylinder(d=d1, h=diameter*.5+2*verticalTolerance, $fn=40);\n" " translate([0,0,-0.001])\n" " cylinder(d1=d1+2*chamfer, d2=d1, h=chamfer);\n" " }\n" "}\n\n" "module halves() {\n" " half();\n" " translate([-8-diameter,0,0]) half();\n" " translate([10+diameter/2,0,0])\n" " render(convexity=0)\n" " intersection() {\n" " cylinder(d=diameter*.4,h=diameter*.5, $fn=40);\n" " cylinder(d1=diameter*.4-2*chamfer+2*diameter*.5,d2=diameter*.4-2*chamfer, h=diameter*.5, $fn=40);\n" " }\n" "}\n\n" "if (split) halves(); else golfball();\n" ); } else { for(i=0;i<N;i++) { printf("%.9f %.9f %.9f\n", best[i].x, best[i].y, best[i].z); } } free(v); free(bestInRun); free(best); free(pos); return 0; }
weights.c
#include <stdio.h> #include <stdint.h> #include <omp.h> #include <math.h> #include "geometry.h" #include "allocator.h" #include "mesh2geo.h" static inline void compute_terms( const struct geometry *g, const double *w, struct xyz *terms0, struct xyz *terms1) { const uint32_t *ie = g->s->i; const uint32_t *part = g->n->part; const uint32_t *n0 = g->e->eptr->n0; const uint32_t *n1 = g->e->eptr->n1; const double *x0 = g->n->xyz->x0; const double *x1 = g->n->xyz->x1; const double *x2 = g->n->xyz->x2; #pragma omp parallel { const uint32_t t = (uint32_t) omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const double coordx0 = x0[node0]; const double coordy0 = x1[node0]; const double coordz0 = x2[node0]; const double coordx1 = x0[node1]; const double coordy1 = x1[node1]; const double coordz1 = x2[node1]; const double dx = coordx1 - coordx0; const double dy = coordy1 - coordy0; const double dz = coordz1 - coordz0; double c0; double c1; double termx; double termy; double termz; if(part[node0] == t) { c0 = - dx * w[node0 * 7 + 1] + dy; c1 = - dx * w[node0 * 7 + 2] + dz; c1 = - w[node0 * 7 + 4] * c0 + c1; termx = w[node0 * 7 + 3] * w[node0 * 7 + 1] * c0; termx = dx * w[node0 * 7 + 0] - termx; termx += w[node0 * 7 + 6] * c1; termy = w[node0 * 7 + 4] * w[node0 * 7 + 5] * c1; termy = w[node0 * 7 + 3] * c0 - termy; termz = w[node0 * 7 + 5] * c1; terms0->x0[i] = termx; terms0->x1[i] = termy; terms0->x2[i] = termz; } if(part[node1] == t) { c0 = dx * w[node1 * 7 + 1] - dy; c1 = dx * w[node1 * 7 + 2] - dz; c1 = - w[node1 * 7 + 4] * c0 + c1; termx = w[node1 * 7 + 3] * w[node1 * 7 + 1] * c0; termx = -dx * w[node1 * 7 + 0] - termx; termx += w[node1 * 7 + 6] * c1; termy = w[node1 * 7 + 4] * w[node1 * 7 + 5] * c1; termy = w[node1 * 7 + 3] * c0 - termy; termz = w[node1 * 7 + 5] * c1; terms1->x0[i] = termx; terms1->x1[i] = termy; terms1->x2[i] = termz; } } } } /* Do w22 */ static inline void w2alloc(const struct geometry *g, double *w) { const uint32_t *ie = g->s->i; const uint32_t *part = g->n->part; const uint32_t *n0 = g->e->eptr->n0; const uint32_t *n1 = g->e->eptr->n1; const double *x0 = g->n->xyz->x0; const double *x1 = g->n->xyz->x1; const double *x2 = g->n->xyz->x2; #pragma omp parallel { const uint32_t t = (uint32_t) omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const double coordx0 = x0[node0]; const double coordy0 = x1[node0]; const double coordz0 = x2[node0]; const double coordx1 = x0[node1]; const double coordy1 = x1[node1]; const double coordz1 = x2[node1]; const double dx = coordx1 - coordx0; const double dy = coordy1 - coordy0; const double dz = coordz1 - coordz0; if(part[node0] == t) { const double d0 = w[node0 * 7 + 1] / w[node0 * 7 + 0]; const double d0_ = dy - dx * d0; const double d1 = w[node0 * 7 + 2] / w[node0 * 7 + 0]; const double d1_ = dz - dx * d1; const double d2 = w[node0 * 7 + 4] / w[node0 * 7 + 3]; const double d2_ = d1_ - d2 * d0_; w[node0 * 7 + 5] += d2_ * d2_; } if(part[node1] == t) { const double d0 = w[node1 * 7 + 1] / w[node1 * 7 + 0]; const double d0_ = -dy + dx * d0; const double d1 = w[node1 * 7 + 2] / w[node1 * 7 + 0]; const double d1_ = -dz + dx * d1; const double d2 = w[node1 * 7 + 4] / w[node1 * 7 + 3]; const double d2_ = d1_ - d2 * d0_; w[node1 * 7 + 5] += d2_ * d2_; } } } } /* Do w11 and w12 */ static inline void w1alloc(const struct geometry *g, double *w) { const uint32_t *ie = g->s->i; const uint32_t *part = g->n->part; const uint32_t *n0 = g->e->eptr->n0; const uint32_t *n1 = g->e->eptr->n1; const double *x0 = g->n->xyz->x0; const double *x1 = g->n->xyz->x1; const double *x2 = g->n->xyz->x2; #pragma omp parallel { const uint32_t t = (uint32_t) omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const double coordx0 = x0[node0]; const double coordy0 = x1[node0]; const double coordz0 = x2[node0]; const double coordx1 = x0[node1]; const double coordy1 = x1[node1]; const double coordz1 = x2[node1]; /* Compute the difference of each coordinate component */ const double dx = coordx1 - coordx0; const double dy = coordy1 - coordy0; const double dz = coordz1 - coordz0; if(part[node0] == t) { const double d = w[node0 * 7 + 1] / w[node0 * 7 + 0]; const double d_ = dy - dx * d; w[node0 * 7 + 3] += d_ * d_; w[node0 * 7 + 4] += d_ * dz; } if(part[node1] == t) { const double d = w[node1 * 7 + 1] / w[node1 * 7 + 0]; const double d_ = -dy + dx * d; w[node1 * 7 + 3] += d_ * d_; w[node1 * 7 + 4] -= d_ * dz; } } } } /* Compute w00, w01, and w02 in parallel */ static inline void w0alloc(const struct geometry *g, double *w) { const uint32_t *ie = g->s->i; const uint32_t *part = g->n->part; const uint32_t *n0 = g->e->eptr->n0; const uint32_t *n1 = g->e->eptr->n1; const double *x0 = g->n->xyz->x0; const double *x1 = g->n->xyz->x1; const double *x2 = g->n->xyz->x2; #pragma omp parallel { const uint32_t t = (uint32_t) omp_get_thread_num(); const uint32_t ie0 = ie[t]; const uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const double coordx0 = x0[node0]; const double coordy0 = x1[node0]; const double coordz0 = x2[node0]; const double coordx1 = x0[node1]; const double coordy1 = x1[node1]; const double coordz1 = x2[node1]; /* * Write-back: Update endpoints * */ if(part[node0] == t) // Do the left endpoint { const double res_x = coordx1 - coordx0; const double res_y = coordy1 - coordy0; const double res_z = coordz1 - coordz0; w[node0 * 7 + 0] += res_x * res_x; w[node0 * 7 + 1] += res_x * res_y; w[node0 * 7 + 2] += res_x * res_z; } if(part[node1] == t) // Do the right endpoint { const double res_x = coordx0 - coordx1; const double res_y = coordy0 - coordy1; const double res_z = coordz0 - coordz1; w[node1 * 7 + 0] += res_x * res_x; w[node1 * 7 + 1] += res_x * res_y; w[node1 * 7 + 2] += res_x * res_z; } } } } void wmalloc(struct geometry *g) { size_t nnodes = g->n->sz; double *w = (double *) fun3d_calloc(7 * nnodes, sizeof(double)); uint32_t i; /* Do w00, w01, and w02 */ w0alloc(g, w); /* Compute ||x|| (norm) and divide the other by the computed norm */ #pragma omp parallel for for(i = 0; i < nnodes; i++) { w[i * 7 + 0] = sqrt(w[i * 7 + 0]); w[i * 7 + 1] /= w[i * 7 + 0]; w[i * 7 + 2] /= w[i * 7 + 0]; } /* Do w11 and w12 */ w1alloc(g, w); #pragma omp parallel for for(i = 0; i < nnodes; i++) { w[i * 7 + 3] = sqrt(w[i * 7 + 3]); w[i * 7 + 4] /= w[i * 7 + 3]; } /* Do w22 */ w2alloc(g, w); /* Update the magnitudes. Stuffs contributed by Dinesh 1998 */ #pragma omp parallel for for(i = 0; i < nnodes; i++) { w[i * 7 + 5] = sqrt(w[i * 7 + 5]); double sw00 = w[i * 7 + 0] * w[i * 7 + 0]; double sw11 = w[i * 7 + 3] * w[i * 7 + 3]; double sw22 = w[i * 7 + 5] * w[i * 7 + 5]; double w00 = 1.f / sw00; double w11 = 1.f / sw11; double w22 = 1.f / sw22; double w01 = w[i * 7 + 1] / w[i * 7 + 0]; double w02 = w[i * 7 + 2] / w[i * 7 + 0]; double w12 = w[i * 7 + 4] / w[i * 7 + 3]; double m0 = w[i * 7 + 1] * w[i * 7 + 4]; m0 -= w[i * 7 + 2] * w[i * 7 + 3]; double m1 = w[i * 7 + 0] * w[i * 7 + 3] * sw22; double w33 = m0 / m1; w[i * 7 + 0] = w00; w[i * 7 + 3] = w11; w[i * 7 + 5] = w22; w[i * 7 + 1] = w01; w[i * 7 + 2] = w02; w[i * 7 + 4] = w12; w[i * 7 + 6] = w33; } size_t nedges = g->e->sz; struct xyz *terms0 = (struct xyz *) fun3d_malloc(1, sizeof(struct xyz)); double *wtermsx0 = (double *) fun3d_calloc(nedges, sizeof(double)); double *wtermsy0 = (double *) fun3d_calloc(nedges, sizeof(double)); double *wtermsz0 = (double *) fun3d_calloc(nedges, sizeof(double)); terms0->x0 = wtermsx0; terms0->x1 = wtermsy0; terms0->x2 = wtermsz0; struct xyz *terms1 = (struct xyz *) fun3d_malloc(1, sizeof(struct xyz)); double *wtermsx1 = (double *) fun3d_calloc(nedges, sizeof(double)); double *wtermsy1 = (double *) fun3d_calloc(nedges, sizeof(double)); double *wtermsz1 = (double *) fun3d_calloc(nedges, sizeof(double)); terms1->x0 = wtermsx1; terms1->x1 = wtermsy1; terms1->x2 = wtermsz1; compute_terms(g, w, terms0, terms1); fun3d_free(w); struct weights *weights = (struct weights *) fun3d_malloc(1, sizeof(struct weights)); weights->w0 = terms0; weights->w1 = terms1; g->e->w = weights; }
broadcast_reduce-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015-2017 by Contributors * \file broadcast_reduce-inl.h * \brief CPU-specific Function definition of broadcast and reduce operators */ #ifndef MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #define MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_ #include <mxnet/operator_util.h> #include <algorithm> #include <vector> #include <string> #include <utility> #include "../mshadow_op.h" #include "../mxnet_op.h" #include "../operator_common.h" namespace mxnet { namespace op { namespace mxnet_op { template <int ndim, typename OP> struct binary_broadcast_kernel { /*! \brief Map function for binary_broadcast_kernel */ template <typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType* lhs, IType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template <typename LType, typename RType, typename OType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, LType* lhs, RType* rhs, OType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ template <typename IType, typename DType> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType lhs, IType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template <typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType* lhs, DType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs[lidx], rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs[lidx], rhs[ridx])); } } /*! \brief Map function for binary_broadcast_kernel */ /* used for mixed type binary ops */ template < typename IType, typename DType, typename std::enable_if<!std::is_same<IType, DType>::value && !std::is_pointer<IType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t base, index_t length, OpReqType req, const Shape<ndim>& lstride, const Shape<ndim>& rstride, const Shape<ndim>& oshape, IType lhs, DType* rhs, DType* out) { Shape<ndim> coord = unravel(base, oshape); auto lidx = static_cast<index_t>(dot(coord, lstride)); auto ridx = static_cast<index_t>(dot(coord, rstride)); KERNEL_ASSIGN(out[base], req, OP::Map(lhs, rhs[ridx])); // starts from 1 to avoid extra inc at end of loop for (index_t i = 1; i < length; ++i) { inc(&coord, oshape, &lidx, lstride, &ridx, rstride); // When tuning, don't actually run the op, since it's not going to be tuned against // the actual op we'll eventually be using KERNEL_ASSIGN(out[base + i], req, OP::Map(lhs, rhs[ridx])); } } }; template <int req, typename OP, bool col_vec> struct csr_dns_csr_broadcast_kernel { /*! * \brief Map function for broadcast between csr and 1D vector * \param row global thread id/assigned row id * \param csr_data ptr to data buffer of csr matrix * \param csr_indices ptr to indices buffer of csr matrix * \param csr_indptr ptr to indptr buffer of csr matrix * \param dns ptr to data buffer of the dense vector * \param out ptr to the data buffer of the result csr matrix */ template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType* csr_data, const CType* csr_indices, const RType* csr_indptr, const DType* dns, DType* out) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { KERNEL_ASSIGN( out[iter], req, OP::Map(csr_data[iter], (col_vec) ? dns[row] : dns[csr_indices[iter]])); } } /*! * \brief Map function for broadcast between csr and a scalar * \param i global thread id * \param csr_data ptr to data buffer of csr matrix * \param scalar_ptr ptr to data buffer of the scalar tensor, only the 0-th element is used * \param out ptr to the data buffer of output csr matrix * \param nnz number of non-zero elements in input csr matrix */ template <typename DType> MSHADOW_XINLINE static void Map(index_t i, const DType* csr_data, const DType* scalar_ptr, DType* out, const nnvm::dim_t nnz) { const DType scale = scalar_ptr[0]; if (i < nnz) { KERNEL_ASSIGN(out[i], req, OP::Map(csr_data[i], scale)); } } }; template <int req, typename OP, bool reverse = false> struct csr_dns_map_kernel { template <typename DType, typename CType, typename RType> MSHADOW_XINLINE static void Map(index_t row, const DType* csr_data, const CType* csr_indices, const RType* csr_indptr, DType* out, const nnvm::dim_t num_rows, const nnvm::dim_t num_cols) { if (row < num_rows) { const nnvm::dim_t curr_row_i = csr_indptr[row]; const nnvm::dim_t next_row_i = csr_indptr[row + 1]; for (nnvm::dim_t iter = curr_row_i; iter < next_row_i; iter++) { const nnvm::dim_t target = row * num_cols + csr_indices[iter]; KERNEL_ASSIGN( out[target], req, reverse ? OP::Map(out[target], csr_data[iter]) : OP::Map(csr_data[iter], out[target])); } } } }; } // namespace mxnet_op namespace broadcast { using namespace mshadow; const int MAX_DIM = 5; template <int ndim> MSHADOW_XINLINE void unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stridej, const Shape<ndim>& stridek, index_t* j, index_t* k) { *j = 0; *k = 0; #pragma unroll for (index_t i = ndim - 1, idx_t = idx; i >= 0; --i) { const auto tmp = idx_t / shape[i]; const auto coord = idx_t - tmp * shape[i]; *j += coord * stridej[i]; *k += coord * stridek[i]; idx_t = tmp; } } template <int ndim> MSHADOW_XINLINE int diff(const Shape<ndim>& small, const Shape<ndim>& big, Shape<ndim>* dims, Shape<ndim>* stride) { int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } template <typename DType> MSHADOW_XINLINE void assign(DType* dst, const bool addto, const DType src) { if (addto) { *dst += src; } else { *dst = src; } } template <int ndim, typename DType, typename OP> MSHADOW_XINLINE void binary_broadcast_assign(const index_t idx, const bool addto, const DType* __restrict lhs, const DType* __restrict rhs, DType* out, const Shape<ndim>& lshape, const Shape<ndim>& rshape, const Shape<ndim>& oshape) { const Shape<ndim> coord = mxnet_op::unravel(idx, oshape); const index_t j = mxnet_op::ravel(coord, lshape); const index_t k = mxnet_op::ravel(coord, rshape); assign(&out[idx], addto, OP::Map(lhs[j], rhs[k])); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> MSHADOW_XINLINE std::pair<AType, AType> seq_reduce_assign_block(size_t start, size_t len, size_t j, const DType* __restrict big, const Shape<ndim>& rshape, const Shape<ndim>& rstride) { Shape<ndim> coord; AType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = start; k < start + len; ++k) { coord = mxnet_op::unravel(k, rshape); AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]); if (IndexOP::do_op) IndexOP::Op(&temp, k); Reducer::Reduce(val, temp, residual); } return std::make_pair(val, residual); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, OType* small, const Shape<ndim>& bshape, const Shape<ndim>& sshape, const Shape<ndim>& rshape, const Shape<ndim>& rstride, const bool use_omp = false) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); AType val, residual; Reducer::SetInitValue(val, residual); if (!use_omp) { for (size_t k = 0; k < M; ++k) { coord = mxnet_op::unravel(k, rshape); AType temp = OP::Map(big[j + mxnet_op::dot(coord, rstride)]); // argmin/max, set IndexedNum.idx if (IndexOP::do_op) IndexOP::Op(&temp, k); Reducer::Reduce(val, temp, residual); } } else { const int thread_count = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); auto pairs = std::make_unique<std::pair<AType, AType>[]>(thread_count); #pragma omp parallel for num_threads(thread_count) for (int i = 0; i < thread_count; ++i) { pairs[i] = seq_reduce_assign_block<Reducer, ndim, AType, DType, OType, OP, IndexOP>( i * (M / thread_count), i < (thread_count - 1) ? (M / thread_count) : (M / thread_count) + M % thread_count, j, big, rshape, rstride); } for (int i = 0; i < thread_count; ++i) { Reducer::Merge(val, residual, pairs[i].first, pairs[i].second); } } Reducer::Finalize(val, residual); assign(&small[idx], addto, OType(val)); } namespace { // Returns the stride with which the fastest dimension is moving. // Used to detect memory access scatter. inline int fastest_stride(const TShape& small, const TShape& big, const TShape& big_stride) { const int ndim = small.ndim(); for (int i = ndim - 1; i >= 0; --i) { if (big[i] != 1) { return (small[i] == big[i]) ? 1 : big_stride[i]; } } return 1; } } // namespace template <int ndim, typename DType, typename OP> void BinaryBroadcastComputeImpl(Stream<cpu>* s, const OpReqType req, const TBlob& lhs, const TBlob& rhs, const TBlob& out) { mshadow::Shape<ndim> oshape = out.shape_.get<ndim>(); mshadow::Shape<ndim> lstride = mxnet_op::calc_stride(lhs.shape_.get<ndim>()); mshadow::Shape<ndim> rstride = mxnet_op::calc_stride(rhs.shape_.get<ndim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<ndim, OP>, cpu>::template LaunchEx( s, out.shape_.Size(), req, lstride, rstride, oshape, lhs.dptr<DType>(), rhs.dptr<DType>(), out.dptr<DType>()); } template <typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP, typename IndexOP = mxnet::op::mshadow_op::set_index_no_op<AType, index_t>> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType* big, OType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride) { const int thread_count = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (N >= thread_count) { #pragma omp parallel for num_threads(thread_count) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>( idx, M, addto, big, small, bshape, sshape, rshape, rstride, false); } } else { for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, AType, DType, OType, OP, IndexOP>( idx, M, addto, big, small, bshape, sshape, rshape, rstride, true); } } } template <typename Reducer, int ndim, typename DType, typename OP> void seq_reduce_compute_extra_mem(const size_t N, const size_t M, const bool addto, const DType* big, DType* small, const Shape<ndim> bshape, const Shape<ndim> sshape, const Shape<ndim> rshape, const Shape<ndim> rstride, const index_t* ws_dptr) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { Shape<ndim> coord = mxnet_op::unravel(idx, sshape); index_t j = mxnet_op::ravel(coord, bshape); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Reducer::Reduce(val, OP::Map(big[j + ws_dptr[k]]), residual); } assign(&small[idx], addto, val); } } template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); if (!safe_acc) { seq_reduce_compute<Reducer, ndim, DType, DType, DType, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } else { MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, { typedef typename std::conditional<safe_acc, AType, DataType>::type AccType; MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, { typedef typename std::conditional<safe_acc, OType, DataType>::type OutType; seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); }); }); } } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceBool(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(), M = rshape.Size(); seq_reduce_compute<Reducer, ndim, bool, DType, bool, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<bool>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride); } template <typename Reducer, int ndim, typename DType, typename OP> void ReduceWithExtraMem(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big) { using namespace mxnet_op; if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); index_t* ws_dptr = reinterpret_cast<index_t*>(workspace.dptr_); size_t N = small.shape_.Size(), M = rshape.Size(); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t k = 0; k < static_cast<index_t>(M); k++) { Shape<ndim> coord = mxnet_op::unravel(k, rshape); ws_dptr[k] = mxnet_op::dot(coord, rstride); } seq_reduce_compute_extra_mem<Reducer, ndim, DType, OP>(N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, ws_dptr); } inline size_t ReduceWorkspaceSize(Stream<cpu>* s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big) { return 0; } inline size_t ReduceWorkspaceSize(Stream<cpu>* s, const mxnet::TShape& small, const OpReqType req, const mxnet::TShape& big, const mxnet::TShape& lhs, const mxnet::TShape& rhs) { return 0; } #if MXNET_USE_CUDA namespace { constexpr int warpSize = 32; constexpr int unroll_reduce = 2; // Returns a/b integer division rounded up template <typename Type> Type ceil_idiv(const Type a, const Type b) { return (a + b - 1) / b; } uint64_t calc_num_load(const int X, const int Y, const int* strides) { // Number of full warps uint64_t num_full_warp = X / warpSize; // Length of the partial warp i.e. number of threads that are performing loads uint64_t len_part_warp = X % warpSize; uint64_t num_load_full = (std::min(warpSize, strides[0]) + std::min(warpSize, strides[1]) + std::min(warpSize, strides[2])) * num_full_warp; uint64_t num_load_part = (std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[0], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[1], warpSize)) + std::min(len_part_warp, ceil_idiv<uint64_t>(len_part_warp * strides[2], warpSize))) * (len_part_warp != 0); uint64_t num_load = (num_load_full + num_load_part) * (uint64_t)Y; return num_load; } inline int diff(const TShape& small, const TShape& big, TShape* dims, TShape* stride) { int ndim = small.ndim(); int mdim = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { mdim += small[i] != big[i]; (*dims)[i] = (*stride)[i] = 1; } index_t s = 1; #pragma unroll for (int i = ndim - 1, j = mdim; i >= 0; --i) { if (small[i] != big[i]) { --j; (*stride)[j] = s; (*dims)[j] = big[i]; } s *= big[i]; } return mdim; } constexpr int nthread_reduce = 512; constexpr index_t kBaseGridNum = 1024; } // namespace // Configuration for ReduceImpl() struct ReduceImplConfig { index_t N; index_t M; index_t Mnext; struct { dim3 blockDim; dim3 gridDim; int shMemSize; bool do_transpose; } kernel_1; struct { int blockSize; int gridSize; } kernel_2; size_t workspace_size; TShape rshape, rstride; TShape lhs_shape, lhs_stride; TShape rhs_shape, rhs_stride; inline ReduceImplConfig(const ::mxnet::TShape& small, const ::mxnet::TShape& big, const ::mxnet::TShape* lhs, const ::mxnet::TShape* rhs) : rshape(small.ndim(), 1), rstride(small.ndim(), 1), lhs_shape(small.ndim(), 1), lhs_stride(small.ndim(), 1), rhs_shape(small.ndim(), 1), rhs_stride(small.ndim(), 1) { // The largest reduction type currently is (index_t, double) struct // aligned to 16B constexpr size_t max_type_size = 2 * sizeof(double); constexpr int maxLoopPerTB = 64; int ndim = small.ndim(); diff(small, big, &rshape, &rstride); N = small.Size(); M = rshape[0]; for (int i = 1; i < ndim; ++i) { M *= rshape[i]; } bool multiOp = false; if (lhs != nullptr) { CHECK_NOTNULL(rhs); diff(small, *lhs, &lhs_shape, &lhs_stride); diff(small, *rhs, &rhs_shape, &rhs_stride); multiOp = true; } workspace_size = 0; kernel_1.shMemSize = 0; kernel_1.do_transpose = false; if (M == 1) { kernel_1.blockDim.x = nthread_reduce; kernel_1.gridDim.x = std::min( kBaseGridNum, static_cast<index_t>((N + kernel_1.blockDim.x - 1) / kernel_1.blockDim.x)); } else { int reduce_strides[3]; reduce_strides[0] = fastest_stride(small, big, big); reduce_strides[1] = (multiOp) ? fastest_stride(small, *lhs, *lhs) : 1; reduce_strides[2] = (multiOp) ? fastest_stride(small, *rhs, *rhs) : 1; int reduce_strides_transp[3]; reduce_strides_transp[0] = fastest_stride(small, rshape, rstride); reduce_strides_transp[1] = (multiOp) ? fastest_stride(small, lhs_shape, lhs_stride) : 1; reduce_strides_transp[2] = (multiOp) ? fastest_stride(small, rhs_shape, rhs_stride) : 1; uint64_t num_load = calc_num_load(N, M, reduce_strides); uint64_t num_load_transp = calc_num_load(M, N, reduce_strides_transp); Mnext = 1; kernel_1.do_transpose = (num_load > num_load_transp); kernel_1.blockDim.x = 0; kernel_1.blockDim.y = 0; if (kernel_1.do_transpose) { // Fastest thread ID goes through M // Loop over N has step size kernel_1.blockDim.y if (N < 8) { kernel_1.blockDim.y = 1; } else if (N < 256) { kernel_1.blockDim.y = 4; } else { if (M < 8) { kernel_1.blockDim.x = 1; } else if (M < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } else { // Fastest thread ID goes through N // Loop over M has step size kernel_1.blockDim.y if (M < 8) { kernel_1.blockDim.y = 1; } else if (M < 256) { kernel_1.blockDim.y = 4; } else { if (N < 8) { kernel_1.blockDim.x = 1; } else if (N < 256) { kernel_1.blockDim.x = 4; } else { kernel_1.blockDim.x = warpSize; } } } if (kernel_1.blockDim.x == 0 && kernel_1.blockDim.y == 0) { LOG(FATAL) << "Unable to set blockDim"; } else if (kernel_1.blockDim.x == 0) { kernel_1.blockDim.x = nthread_reduce / kernel_1.blockDim.y; } else if (kernel_1.blockDim.y == 0) { kernel_1.blockDim.y = nthread_reduce / kernel_1.blockDim.x; } if (kernel_1.do_transpose) { // Fastest thread ID goes through M kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.y)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); int by = kernel_1.blockDim.y; if (kernel_1.blockDim.y % warpSize == 0) { // Fix shared memory bank conflict by++; } kernel_1.shMemSize = (kernel_1.blockDim.x > 1) ? kernel_1.blockDim.x * by * max_type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.x * maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } else { // Fastest thread ID goes through N kernel_1.gridDim.x = std::min((unsigned int)kBaseGridNum, ceil_idiv<unsigned int>(N, kernel_1.blockDim.x)); kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); kernel_1.shMemSize = (kernel_1.blockDim.y > 1) ? kernel_1.blockDim.x * kernel_1.blockDim.y * max_type_size * 2 : 0; // Maximum number of times we want TB to loop in M // Max size of M-block each TB can handle int maxMblock = kernel_1.blockDim.y * maxLoopPerTB; Mnext = (M + maxMblock - 1) / maxMblock; } if (Mnext > 1) { // small_dptr[] is N*Mnext*type_size bytes workspace_size += N * Mnext * max_type_size; // Set gridDim.y to Mnext kernel_1.gridDim.y = std::min(kBaseGridNum, Mnext); } if (Mnext > 1) { kernel_2.blockSize = nthread_reduce; kernel_2.gridSize = std::min( kBaseGridNum, static_cast<index_t>((N + kernel_2.blockSize - 1) / kernel_2.blockSize)); } } } }; inline size_t ReduceWorkspaceSize(Stream<gpu>* s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, nullptr, nullptr); return config.workspace_size; } inline size_t ReduceWorkspaceSize(Stream<gpu>* s, const ::mxnet::TShape& small, const OpReqType req, const ::mxnet::TShape& big, const ::mxnet::TShape& lhs, const ::mxnet::TShape& rhs) { if (req == kNullOp) return 0; ReduceImplConfig config(small, big, &lhs, &rhs); return config.workspace_size; } #endif // MXNET_USE_CUDA template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> MSHADOW_XINLINE void seq_reduce_assign(const index_t idx, const size_t M, const bool addto, const DType* __restrict big, const DType* __restrict lhs, const DType* __restrict rhs, DType* small, const Shape<ndim>& big_shape, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0, const Shape<ndim>& small_shape, const Shape<ndim>& rshape, const Shape<ndim>& lhs_shape, const Shape<ndim>& rhs_shape, const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride, const Shape<ndim>& rhs_stride) { Shape<ndim> coord = mxnet_op::unravel(idx, small_shape); const index_t idx_big0 = mxnet_op::ravel(coord, big_shape); const index_t idx_lhs0 = mxnet_op::ravel(coord, lhs_shape0); const index_t idx_rhs0 = mxnet_op::ravel(coord, rhs_shape0); DType val, residual; Reducer::SetInitValue(val, residual); for (size_t k = 0; k < M; ++k) { Shape<ndim> coord_big = mxnet_op::unravel(k, rshape); index_t idx_big = idx_big0 + mxnet_op::dot(coord_big, rstride); Shape<ndim> coord_lhs = mxnet_op::unravel(k, lhs_shape); index_t idx_lhs = idx_lhs0 + mxnet_op::dot(coord_lhs, lhs_stride); Shape<ndim> coord_rhs = mxnet_op::unravel(k, rhs_shape); index_t idx_rhs = idx_rhs0 + mxnet_op::dot(coord_rhs, rhs_stride); Reducer::Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual); } Reducer::Finalize(val, residual); assign(&small[idx], addto, val); } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void seq_reduce_compute(const size_t N, const size_t M, const bool addto, const DType* big, const DType* lhs, const DType* rhs, DType* small, const Shape<ndim> big_shape, const Shape<ndim> small_shape, const Shape<ndim> rshape, const Shape<ndim> rstride, const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride, const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride, const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) { seq_reduce_assign<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small, big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride, lhs_stride, rhs_stride); } } template <typename Reducer, int ndim, typename DType, typename OP1, typename OP2> void Reduce(Stream<cpu>* s, const TBlob& small, const OpReqType req, const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs) { if (req == kNullOp) return; Shape<ndim> rshape, rstride; diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride); size_t N = small.shape_.Size(); size_t M = rshape.Size(); Shape<ndim> lhs_shape, lhs_stride; diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride); Shape<ndim> rhs_shape, rhs_stride; diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride); seq_reduce_compute<Reducer, ndim, DType, OP1, OP2>(N, M, req == kAddTo, big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(), big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, lhs_shape, lhs_stride, rhs_shape, rhs_stride, lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>()); } #if MXNET_USE_CUDA void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const std::string& reducer, int ndim, const std::string& OP, const bool use_index = false); void RTCReduce(const OpContext& ctx, const TBlob& small, const OpReqType req, const Tensor<gpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs, const TBlob& rhs, const std::string& reducer, int ndim, const std::string& OP1, const std::string& OP2); #endif } // namespace broadcast } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_BROADCAST_REDUCE_INL_H_
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; register const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; register ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; register const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { register ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; register ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_chain=(PointInfo **) AcquireQuantumMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_chain == (PointInfo **) NULL)) { if (monotone_chain != (PointInfo **) NULL) monotone_chain=(PointInfo **) RelinquishMagickMemory(monotone_chain); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_chain=(PointInfo **) RelinquishMagickMemory(monotone_chain); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; register ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; register ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { double diameter; diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const Quantum *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; register ssize_t x; register const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const Quantum *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { register ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; register ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
fdtd2d.c
/** * fdtd2d.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <[email protected]> * Rafael Cardoso F Sousa <[email protected]> * Luís Felipe Mattos <[email protected]> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 /* Problem size. */ #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define tmax 500 #define NX SIZE #define NY SIZE /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE)i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i * NY + j] = ((DATA_TYPE)i * (j + 1) + 1) / NX; ey[i * NY + j] = ((DATA_TYPE)(i - 1) * (j + 2) + 2) / NX; hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX; } } } void init_array_hz(DATA_TYPE *hz) { int i, j; for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i * NY + j] = ((DATA_TYPE)(i - 9) * (j + 4) + 3) / NX; } } } int compareResults(DATA_TYPE *hz1, DATA_TYPE *hz2) { int i, j, fail; fail = 0; for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { if (percentDiff(hz1[i * NY + j], hz2[i * NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } void runFdtd(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) { int t, i, j; for (t = 0; t < tmax; t++) { for (j = 0; j < NY; j++) { ey[0 * NY + j] = _fict_[t]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i * NY + j] = ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] - 0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i * NY + j] = hz[i * NY + j] - 0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } } } void runFdtd_OMP(DATA_TYPE *_fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz) { int t, i, j; #pragma omp target data map(to : _fict_[ : tmax], ex[ : (NX *(NY + 1))], ey[ : ((NX + 1) * NY)]) map(tofrom : hz[ : (NX *(NY + 1))]) device(DEVICE_ID) { for (t = 0; t < tmax; t++) { #pragma omp target teams distribute parallel for device(DEVICE_ID) for (j = 0; j < NY; j++) { ey[0 * NY + j] = _fict_[t]; } #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i * NY + j] = ey[i * NY + j] - 0.5 * (hz[i * NY + j] - hz[(i - 1) * NY + j]); } } #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i * (NY + 1) + j] = ex[i * (NY + 1) + j] - 0.5 * (hz[i * NY + j] - hz[i * NY + (j - 1)]); } } #pragma omp target teams distribute parallel for collapse(2) device(DEVICE_ID) for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i * NY + j] = hz[i * NY + j] - 0.7 * (ex[i * (NY + 1) + (j + 1)] - ex[i * (NY + 1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } } } } int main() { double t_start, t_end; int fail = 0; DATA_TYPE *_fict_; DATA_TYPE *ex; DATA_TYPE *ey; DATA_TYPE *hz; DATA_TYPE *hz_outputFromGpu; _fict_ = (DATA_TYPE *)malloc(tmax * sizeof(DATA_TYPE)); ex = (DATA_TYPE *)malloc(NX * (NY + 1) * sizeof(DATA_TYPE)); ey = (DATA_TYPE *)malloc((NX + 1) * NY * sizeof(DATA_TYPE)); hz = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE *)malloc(NX * NY * sizeof(DATA_TYPE)); fprintf(stdout, "<< 2-D Finite Different Time Domain Kernel >>\n"); init_arrays(_fict_, ex, ey, hz); init_array_hz(hz_outputFromGpu); t_start = rtclock(); runFdtd_OMP(_fict_, ex, ey, hz_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); runFdtd(_fict_, ex, ey, hz); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(hz, hz_outputFromGpu); #endif free(_fict_); free(ex); free(ey); free(hz); free(hz_outputFromGpu); return fail; }
libsais.c
/*-- This file is a part of libsais, a library for linear time suffix array and burrows wheeler transform construction. Copyright (c) 2021 Ilya Grebnov <[email protected]> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Please see the file LICENSE for full copyright information. --*/ #include "libsais_internal.h" #include "libsais.h" #include <stddef.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <limits.h> #if defined(_OPENMP) #include <omp.h> #else #define UNUSED(_x) (void)(_x) #endif typedef int32_t sa_sint_t; typedef uint32_t sa_uint_t; typedef ptrdiff_t fast_sint_t; typedef size_t fast_uint_t; #define SAINT_BIT (32) #define SAINT_MAX INT32_MAX #define SAINT_MIN INT32_MIN #define ALPHABET_SIZE (1 << CHAR_BIT) #define UNBWT_FASTBITS (17) #define SUFFIX_GROUP_BIT (SAINT_BIT - 1) #define SUFFIX_GROUP_MARKER (((sa_sint_t)1) << (SUFFIX_GROUP_BIT - 1)) #define BUCKETS_INDEX2(_c, _s) (((_c) << 1) + (_s)) #define BUCKETS_INDEX4(_c, _s) (((_c) << 2) + (_s)) #define LIBSAIS_PER_THREAD_CACHE_SIZE (24576) typedef struct LIBSAIS_THREAD_CACHE { sa_sint_t symbol; sa_sint_t index; } LIBSAIS_THREAD_CACHE; typedef union LIBSAIS_THREAD_STATE { struct { fast_sint_t position; fast_sint_t count; fast_sint_t m; fast_sint_t last_lms_suffix; sa_sint_t * buckets; LIBSAIS_THREAD_CACHE * cache; } state; uint8_t padding[64]; } LIBSAIS_THREAD_STATE; typedef struct LIBSAIS_CONTEXT { sa_sint_t * buckets; LIBSAIS_THREAD_STATE * thread_state; fast_sint_t threads; } LIBSAIS_CONTEXT; typedef struct LIBSAIS_UNBWT_CONTEXT { sa_uint_t * bucket2; uint16_t * fastbits; sa_uint_t * buckets; fast_sint_t threads; } LIBSAIS_UNBWT_CONTEXT; #if defined(__GNUC__) || defined(__clang__) #define RESTRICT __restrict__ #elif defined(_MSC_VER) || defined(__INTEL_COMPILER) #define RESTRICT __restrict #else #error Your compiler, configuration or platform is not supported. #endif #if defined(__has_builtin) #if __has_builtin(__builtin_prefetch) #define HAS_BUILTIN_PREFECTCH #endif #elif defined(__GNUC__) && __GNUC__ > 3 #define HAS_BUILTIN_PREFECTCH #endif #if defined(HAS_BUILTIN_PREFECTCH) #define libsais_prefetch(address) __builtin_prefetch((const void *)(address), 0, 0) #define libsais_prefetchw(address) __builtin_prefetch((const void *)(address), 1, 0) #elif defined (_M_IX86) || defined (_M_AMD64) #include <intrin.h> #define libsais_prefetch(address) _mm_prefetch((const void *)(address), _MM_HINT_NTA) #define libsais_prefetchw(address) _m_prefetchw((const void *)(address)) #elif defined (_M_ARM) #include <intrin.h> #define libsais_prefetch(address) __prefetch((const void *)(address)) #define libsais_prefetchw(address) __prefetchw((const void *)(address)) #elif defined (_M_ARM64) #include <intrin.h> #define libsais_prefetch(address) __prefetch2((const void *)(address), 1) #define libsais_prefetchw(address) __prefetch2((const void *)(address), 17) #else #error Your compiler, configuration or platform is not supported. #endif #if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) #if defined(_LITTLE_ENDIAN) \ || (defined(BYTE_ORDER) && defined(LITTLE_ENDIAN) && BYTE_ORDER == LITTLE_ENDIAN) \ || (defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && _BYTE_ORDER == _LITTLE_ENDIAN) \ || (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && __BYTE_ORDER == __LITTLE_ENDIAN) \ || (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #define __LITTLE_ENDIAN__ #elif defined(_BIG_ENDIAN) \ || (defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN) \ || (defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && _BYTE_ORDER == _BIG_ENDIAN) \ || (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && __BYTE_ORDER == __BIG_ENDIAN) \ || (defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) #define __BIG_ENDIAN__ #elif defined(_WIN32) #define __LITTLE_ENDIAN__ #endif #endif #if defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) #if defined(__GNUC__) || defined(__clang__) #define libsais_bswap16(x) (__builtin_bswap16(x)) #elif defined(_MSC_VER) && !defined(__INTEL_COMPILER) #define libsais_bswap16(x) (_byteswap_ushort(x)) #else #define libsais_bswap16(x) ((uint16_t)(x >> 8) | (uint16_t)(x << 8)) #endif #elif !defined(__LITTLE_ENDIAN__) && defined(__BIG_ENDIAN__) #define libsais_bswap16(x) (x) #else #error Your compiler, configuration or platform is not supported. #endif static void * libsais_align_up(const void * address, size_t alignment) { return (void *)((((ptrdiff_t)address) + ((ptrdiff_t)alignment) - 1) & (-((ptrdiff_t)alignment))); } static void * libsais_alloc_aligned(size_t size, size_t alignment) { void * address = malloc(size + sizeof(short) + alignment - 1); if (address != NULL) { void * aligned_address = libsais_align_up((void *)((ptrdiff_t)address + (ptrdiff_t)(sizeof(short))), alignment); ((short *)aligned_address)[-1] = (short)((ptrdiff_t)aligned_address - (ptrdiff_t)address); return aligned_address; } return NULL; } static void libsais_free_aligned(void * aligned_address) { if (aligned_address != NULL) { free((void *)((ptrdiff_t)aligned_address - ((short *)aligned_address)[-1])); } } static LIBSAIS_THREAD_STATE * libsais_alloc_thread_state(sa_sint_t threads) { LIBSAIS_THREAD_STATE * RESTRICT thread_state = (LIBSAIS_THREAD_STATE *)libsais_alloc_aligned((size_t)threads * sizeof(LIBSAIS_THREAD_STATE), 4096); sa_sint_t * RESTRICT thread_buckets = (sa_sint_t *)libsais_alloc_aligned((size_t)threads * 4 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); LIBSAIS_THREAD_CACHE * RESTRICT thread_cache = (LIBSAIS_THREAD_CACHE *)libsais_alloc_aligned((size_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE * sizeof(LIBSAIS_THREAD_CACHE), 4096); if (thread_state != NULL && thread_buckets != NULL && thread_cache != NULL) { fast_sint_t t; for (t = 0; t < threads; ++t) { thread_state[t].state.buckets = thread_buckets; thread_buckets += 4 * ALPHABET_SIZE; thread_state[t].state.cache = thread_cache; thread_cache += LIBSAIS_PER_THREAD_CACHE_SIZE; } return thread_state; } libsais_free_aligned(thread_cache); libsais_free_aligned(thread_buckets); libsais_free_aligned(thread_state); return NULL; } static void libsais_free_thread_state(LIBSAIS_THREAD_STATE * thread_state) { if (thread_state != NULL) { libsais_free_aligned(thread_state[0].state.cache); libsais_free_aligned(thread_state[0].state.buckets); libsais_free_aligned(thread_state); } } static LIBSAIS_CONTEXT * libsais_create_ctx_main(sa_sint_t threads) { LIBSAIS_CONTEXT * RESTRICT ctx = (LIBSAIS_CONTEXT *)libsais_alloc_aligned(sizeof(LIBSAIS_CONTEXT), 64); sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL; if (ctx != NULL && buckets != NULL && (thread_state != NULL || threads == 1)) { ctx->buckets = buckets; ctx->threads = threads; ctx->thread_state = thread_state; return ctx; } libsais_free_thread_state(thread_state); libsais_free_aligned(buckets); libsais_free_aligned(ctx); return NULL; } static void libsais_free_ctx_main(LIBSAIS_CONTEXT * ctx) { if (ctx != NULL) { libsais_free_thread_state(ctx->thread_state); libsais_free_aligned(ctx->buckets); libsais_free_aligned(ctx); } } #if defined(_OPENMP) static sa_sint_t libsais_count_negative_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { sa_sint_t count = 0; fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] < 0); } return count; } static sa_sint_t libsais_count_zero_marked_suffixes(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { sa_sint_t count = 0; fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { count += (SA[i] == 0); } return count; } static void libsais_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&cache[i + 2 * prefetch_distance]); libsais_prefetchw(&SA[cache[i + prefetch_distance + 0].symbol]); libsais_prefetchw(&SA[cache[i + prefetch_distance + 1].symbol]); libsais_prefetchw(&SA[cache[i + prefetch_distance + 2].symbol]); libsais_prefetchw(&SA[cache[i + prefetch_distance + 3].symbol]); SA[cache[i + 0].symbol] = cache[i + 0].index; SA[cache[i + 1].symbol] = cache[i + 1].index; SA[cache[i + 2].symbol] = cache[i + 2].index; SA[cache[i + 3].symbol] = cache[i + 3].index; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[cache[i].symbol] = cache[i].index; } } static void libsais_compact_and_place_cached_suffixes(sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais_prefetchw(&cache[i + prefetch_distance]); cache[l] = cache[i + 0]; l += cache[l].symbol >= 0; cache[l] = cache[i + 1]; l += cache[l].symbol >= 0; cache[l] = cache[i + 2]; l += cache[l].symbol >= 0; cache[l] = cache[i + 3]; l += cache[l].symbol >= 0; } for (j += 3; i < j; i += 1) { cache[l] = cache[i]; l += cache[l].symbol >= 0; } libsais_place_cached_suffixes(SA, cache, omp_block_start, l - omp_block_start); } static void libsais_accumulate_counts_s32_2(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s]; } } static void libsais_accumulate_counts_s32_3(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s]; } } static void libsais_accumulate_counts_s32_4(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s]; } } static void libsais_accumulate_counts_s32_5(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s]; } } static void libsais_accumulate_counts_s32_6(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s]; } } static void libsais_accumulate_counts_s32_7(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s]; } } static void libsais_accumulate_counts_s32_8(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s]; } } static void libsais_accumulate_counts_s32_9(sa_sint_t * RESTRICT bucket00, fast_sint_t bucket_size, fast_sint_t bucket_stride) { sa_sint_t * RESTRICT bucket01 = bucket00 - bucket_stride; sa_sint_t * RESTRICT bucket02 = bucket01 - bucket_stride; sa_sint_t * RESTRICT bucket03 = bucket02 - bucket_stride; sa_sint_t * RESTRICT bucket04 = bucket03 - bucket_stride; sa_sint_t * RESTRICT bucket05 = bucket04 - bucket_stride; sa_sint_t * RESTRICT bucket06 = bucket05 - bucket_stride; sa_sint_t * RESTRICT bucket07 = bucket06 - bucket_stride; sa_sint_t * RESTRICT bucket08 = bucket07 - bucket_stride; fast_sint_t s; for (s = 0; s < bucket_size; s += 1) { bucket00[s] = bucket00[s] + bucket01[s] + bucket02[s] + bucket03[s] + bucket04[s] + bucket05[s] + bucket06[s] + bucket07[s] + bucket08[s]; } } static void libsais_accumulate_counts_s32(sa_sint_t * RESTRICT buckets, fast_sint_t bucket_size, fast_sint_t bucket_stride, fast_sint_t num_buckets) { while (num_buckets >= 9) { libsais_accumulate_counts_s32_9(buckets - (num_buckets - 9) * bucket_stride, bucket_size, bucket_stride); num_buckets -= 8; } switch (num_buckets) { case 1: break; case 2: libsais_accumulate_counts_s32_2(buckets, bucket_size, bucket_stride); break; case 3: libsais_accumulate_counts_s32_3(buckets, bucket_size, bucket_stride); break; case 4: libsais_accumulate_counts_s32_4(buckets, bucket_size, bucket_stride); break; case 5: libsais_accumulate_counts_s32_5(buckets, bucket_size, bucket_stride); break; case 6: libsais_accumulate_counts_s32_6(buckets, bucket_size, bucket_stride); break; case 7: libsais_accumulate_counts_s32_7(buckets, bucket_size, bucket_stride); break; case 8: libsais_accumulate_counts_s32_8(buckets, bucket_size, bucket_stride); break; } } #endif static void libsais_gather_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, fast_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 128; fast_sint_t i, j = omp_block_start + omp_block_size, c0 = T[omp_block_start + omp_block_size - 1], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = omp_block_start + omp_block_size - 2, j = omp_block_start + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); } for (j -= 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); } SA[m] = (sa_sint_t)(i + 1); } } static void libsais_gather_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_gather_lms_suffixes_8u(T, SA, n, (fast_sint_t)n - 1, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t > omp_thread_num; --t) { m += thread_state[t].state.m; } libsais_gather_lms_suffixes_8u(T, SA, n, (fast_sint_t)n - 1 - m, omp_block_start, omp_block_size); #pragma omp barrier if (thread_state[omp_thread_num].state.m > 0) { SA[(fast_sint_t)n - 1 - m] = (sa_sint_t)thread_state[omp_thread_num].state.last_lms_suffix; } } #endif } } static sa_sint_t libsais_gather_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = n - 1; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= 3; i -= 4) { libsais_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((s & 3) == 1); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((s & 3) == 1); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((s & 3) == 1); } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((s & 3) == 1); } return n - 1 - m; } static sa_sint_t libsais_gather_compacted_lms_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = n - 1; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= 3; i -= 4) { libsais_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 0; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = i - 1; m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i - 2; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = i + 1; m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); } return n - 1 - m; } #if defined(_OPENMP) static void libsais_count_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]++; } #endif static void libsais_count_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++; } #if defined(_OPENMP) static void libsais_count_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t i = n - 2; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, 0)]++; } #endif static sa_sint_t libsais_count_and_gather_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 128; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - prefetch_distance]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (j -= 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais_count_and_gather_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_8u(T, SA, n, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.m = libsais_count_and_gather_lms_suffixes_8u(T, SA, n, thread_state[omp_thread_num].state.buckets, omp_block_start, omp_block_size); if (thread_state[omp_thread_num].state.m > 0) { thread_state[omp_thread_num].state.last_lms_suffix = SA[thread_state[omp_thread_num].state.position - 1]; } } #pragma omp barrier #pragma omp master { memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.m; if (t != omp_num_threads - 1 && thread_state[t].state.m > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.m], (size_t)thread_state[t].state.m * sizeof(sa_sint_t)); } { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t s; for (s = 0; s < 4 * ALPHABET_SIZE; s += 1) { sa_sint_t A = buckets[s], B = temp_bucket[s]; buckets[s] = A + B; temp_bucket[s] = A; } } } } } #endif } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 4 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 0], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 1], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 2], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX4(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX4((fast_uint_t)c0, s & 3)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2], 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3], 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((s & 3) == 1); buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } static sa_sint_t libsais_count_and_gather_compacted_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); fast_sint_t m = omp_block_start + omp_block_size - 1; if (omp_block_size > 0) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j = m + 1, c0 = T[m], c1 = -1; while (j < n && (c1 = T[j]) == c0) { ++j; } fast_uint_t s = c0 >= c1; for (i = m - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 0] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 1] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 2] & SAINT_MAX, 0)]); libsais_prefetchw(&buckets[BUCKETS_INDEX2(T[i - prefetch_distance - 3] & SAINT_MAX, 0)]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 0); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i - 2); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c1 >= 0)); c1 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c1, (s & 3) == 1)]++; } c1 = (i >= 0) ? T[i] : -1; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); SA[m] = (sa_sint_t)(i + 1); m -= ((fast_sint_t)(s & 3) == (c0 >= 0)); c0 &= SAINT_MAX; buckets[BUCKETS_INDEX2((fast_uint_t)c0, (s & 3) == 1)]++; } return (sa_sint_t)(omp_block_start + omp_block_size - 1 - m); } #if defined(_OPENMP) static fast_sint_t libsais_get_bucket_stride(fast_sint_t free_space, fast_sint_t bucket_size, fast_sint_t num_buckets) { fast_sint_t bucket_size_1024 = (bucket_size + 1023) & (-1024); if (free_space / (num_buckets - 1) >= bucket_size_1024) { return bucket_size_1024; } fast_sint_t bucket_size_16 = (bucket_size + 15) & (-16); if (free_space / (num_buckets - 1) >= bucket_size_16) { return bucket_size_16; } return bucket_size; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 4 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier if (omp_thread_num == omp_num_threads - 1) { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } else { omp_num_threads = omp_num_threads - 1; omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1); } } #endif } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 2 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier if (omp_thread_num == omp_num_threads - 1) { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { m += (sa_sint_t)thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memcpy(&SA[n - m], &SA[thread_state[t].state.position - thread_state[t].state.count], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } else { omp_num_threads = omp_num_threads - 1; omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads + 1); } } #endif } return m; } static void libsais_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { fast_sint_t bucket_size = 2 * (fast_sint_t)k; fast_sint_t bucket_stride = libsais_get_bucket_stride(buckets - &SA[n + n], bucket_size, omp_num_threads); { thread_state[omp_thread_num].state.position = omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA + n, n, k, buckets - (omp_thread_num * bucket_stride), omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, m = 0; for (t = omp_num_threads - 1; t >= omp_thread_num; --t) { m += (sa_sint_t)thread_state[t].state.count; } if (thread_state[omp_thread_num].state.count > 0) { memcpy(&SA[n - m], &SA[n + thread_state[omp_thread_num].state.position - thread_state[omp_thread_num].state.count], (size_t)thread_state[omp_thread_num].state.count * sizeof(sa_sint_t)); } } { omp_block_stride = (bucket_size / omp_num_threads) & (-16); omp_block_start = omp_thread_num * omp_block_stride; omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : bucket_size - omp_block_start; libsais_accumulate_counts_s32(buckets + omp_block_start, omp_block_size, bucket_stride, omp_num_threads); } } #endif } } #endif static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_32s_4k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais_count_lms_suffixes_32s_4k(T, n, k, buckets); } else { m = libsais_gather_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais_count_lms_suffixes_32s_2k(T, n, k, buckets); } else { m = libsais_gather_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads) { sa_sint_t m = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(2) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { m = libsais_count_and_gather_compacted_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); } #if defined(_OPENMP) else if (omp_thread_num == 0) { libsais_count_compacted_lms_suffixes_32s_2k(T, n, k, buckets); } else { m = libsais_gather_compacted_lms_suffixes_32s(T, SA, n); } #endif } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m; #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((4 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 16 / k) { max_threads = n / 16 / k; } m = libsais_count_and_gather_lms_suffixes_32s_4k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { m = libsais_count_and_gather_lms_suffixes_32s_4k_nofs_omp(T, SA, n, k, buckets, threads); } return m; } static sa_sint_t libsais_count_and_gather_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m; #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 8 / k) { max_threads = n / 8 / k; } m = libsais_count_and_gather_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { m = libsais_count_and_gather_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads); } return m; } static void libsais_count_and_gather_compacted_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) sa_sint_t max_threads = (sa_sint_t)((buckets - &SA[n + n]) / ((2 * (fast_sint_t)k + 15) & (-16))); if (max_threads > threads) { max_threads = threads; } if (max_threads > 1 && n >= 65536 && n / k >= 2) { if (max_threads > n / 8 / k) { max_threads = n / 8 / k; } libsais_count_and_gather_compacted_lms_suffixes_32s_2k_fs_omp(T, SA, n, k, buckets, max_threads > 2 ? max_threads : 2, thread_state); } else #else UNUSED(thread_state); #endif { libsais_count_and_gather_compacted_lms_suffixes_32s_2k_nofs_omp(T, SA, n, k, buckets, threads); } } static void libsais_count_suffixes_32s(const sa_sint_t * RESTRICT T, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, (size_t)k * sizeof(sa_sint_t)); fast_sint_t i, j; for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8) { libsais_prefetch(&T[i + prefetch_distance]); buckets[T[i + 0]]++; buckets[T[i + 1]]++; buckets[T[i + 2]]++; buckets[T[i + 3]]++; buckets[T[i + 4]]++; buckets[T[i + 5]]++; buckets[T[i + 6]]++; buckets[T[i + 7]]++; } for (j += 7; i < j; i += 1) { buckets[T[i]]++; } } static void libsais_initialize_buckets_start_and_end_8u(sa_sint_t * RESTRICT buckets, sa_sint_t * RESTRICT freq) { sa_sint_t * RESTRICT bucket_start = &buckets[6 * ALPHABET_SIZE]; sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE]; if (freq != NULL) { fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += (freq[j] = buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]); bucket_end[j] = sum; } } else { fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]; bucket_end[j] = sum; } } } static void libsais_initialize_buckets_start_and_end_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT bucket_start = &buckets[4 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[5 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 2)] + buckets[i + BUCKETS_INDEX4(0, 3)]; bucket_end[j] = sum; } } static void libsais_initialize_buckets_start_and_end_32s_4k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT bucket_start = &buckets[2 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { bucket_start[j] = sum; sum += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; bucket_end[j] = sum; } } static void libsais_initialize_buckets_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum0 = 0; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 0)] = sum0; } } static void libsais_initialize_buckets_start_and_end_32s_2k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i, j; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { buckets[j] = buckets[i]; } buckets[k] = 0; memcpy(&buckets[k + 1], buckets, ((size_t)k - 1) * sizeof(sa_sint_t)); } static void libsais_initialize_buckets_start_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum = 0; for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sa_sint_t tmp = buckets[i]; buckets[i] = sum; sum += tmp; } } static void libsais_initialize_buckets_end_32s_1k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { fast_sint_t i; sa_sint_t sum = 0; for (i = 0; i <= (fast_sint_t)k - 1; i += 1) { sum += buckets[i]; buckets[i] = sum; } } static sa_sint_t libsais_initialize_buckets_for_lms_suffixes_radix_sort_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { { fast_uint_t s = 0; fast_sint_t c0 = T[first_lms_suffix]; fast_sint_t c1 = 0; for (; --first_lms_suffix >= 0; ) { c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--; } { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum; sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum; } return sum; } } static void libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--; fast_sint_t i; sa_sint_t sum0 = 0, sum1 = 0; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { sum0 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; sum1 += buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 0)] = sum0; buckets[i + BUCKETS_INDEX2(0, 1)] = sum1; } } static sa_sint_t libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { { fast_uint_t s = 0; fast_sint_t c0 = T[first_lms_suffix]; fast_sint_t c1 = 0; for (; --first_lms_suffix >= 0; ) { c1 = c0; c0 = T[first_lms_suffix]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); buckets[BUCKETS_INDEX4((fast_uint_t)c1, s & 3)]--; } buckets[BUCKETS_INDEX4((fast_uint_t)c0, (s << 1) & 3)]--; } { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i, j; sa_sint_t sum = 0; for (i = BUCKETS_INDEX4(0, 0), j = 0; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += 1) { sum += buckets[i + BUCKETS_INDEX4(0, 1)] + buckets[i + BUCKETS_INDEX4(0, 3)]; temp_bucket[j] = sum; } return sum; } } static void libsais_initialize_buckets_for_radix_and_partial_sorting_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix) { sa_sint_t * RESTRICT bucket_start = &buckets[2 * k]; sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 0)]++; buckets[BUCKETS_INDEX2(T[first_lms_suffix], 1)]--; fast_sint_t i, j; sa_sint_t sum0 = 0, sum1 = 0; for (i = BUCKETS_INDEX2(0, 0), j = 0; i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0), j += 1) { bucket_start[j] = sum1; sum0 += buckets[i + BUCKETS_INDEX2(0, 1)]; sum1 += buckets[i + BUCKETS_INDEX2(0, 0)] + buckets[i + BUCKETS_INDEX2(0, 1)]; buckets[i + BUCKETS_INDEX2(0, 1)] = sum0; bucket_end[j] = sum1; } } static void libsais_radix_sort_lms_suffixes_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - 2 * prefetch_distance]); libsais_prefetch(&T[SA[i - prefetch_distance - 0]]); libsais_prefetch(&T[SA[i - prefetch_distance - 1]]); libsais_prefetch(&T[SA[i - prefetch_distance - 2]]); libsais_prefetch(&T[SA[i - prefetch_distance - 3]]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p; } } static void libsais_radix_sort_lms_suffixes_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536 && m >= 65536 && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_num_threads = 1; #endif if (omp_num_threads == 1) { libsais_radix_sort_lms_suffixes_8u(T, SA, &buckets[4 * ALPHABET_SIZE], (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { { sa_sint_t * RESTRICT src_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT dst_bucket = thread_state[omp_thread_num].state.buckets; fast_sint_t i, j; for (i = BUCKETS_INDEX2(0, 0), j = BUCKETS_INDEX4(0, 1); i <= BUCKETS_INDEX2(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX2(1, 0), j += BUCKETS_INDEX4(1, 0)) { dst_bucket[i] = src_bucket[i] - dst_bucket[j]; } } { fast_sint_t t, omp_block_start = 0, omp_block_size = thread_state[omp_thread_num].state.m; for (t = omp_num_threads - 1; t >= omp_thread_num; --t) omp_block_start += thread_state[t].state.m; if (omp_block_start == (fast_sint_t)m && omp_block_size > 0) { omp_block_start -= 1; omp_block_size -= 1; } libsais_radix_sort_lms_suffixes_8u(T, SA, thread_state[omp_thread_num].state.buckets, (fast_sint_t)n - omp_block_start, omp_block_size); } } #endif } } static void libsais_radix_sort_lms_suffixes_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - 3 * prefetch_distance]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]); libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 0]]]); libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 1]]]); libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 2]]]); libsais_prefetchw(&induction_bucket[T[SA[i - prefetch_distance - 3]]]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[T[p0]]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[T[p1]]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[T[p2]]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[T[p3]]] = p3; } for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[T[p]]] = p; } } static void libsais_radix_sort_lms_suffixes_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - 3 * prefetch_distance]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 2]]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 3]]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 0]], 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 1]], 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 2]], 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(T[SA[i - prefetch_distance - 3]], 0)]); sa_sint_t p0 = SA[i - 0]; SA[--induction_bucket[BUCKETS_INDEX2(T[p0], 0)]] = p0; sa_sint_t p1 = SA[i - 1]; SA[--induction_bucket[BUCKETS_INDEX2(T[p1], 0)]] = p1; sa_sint_t p2 = SA[i - 2]; SA[--induction_bucket[BUCKETS_INDEX2(T[p2], 0)]] = p2; sa_sint_t p3 = SA[i - 3]; SA[--induction_bucket[BUCKETS_INDEX2(T[p3], 0)]] = p3; } for (j -= 2 * prefetch_distance + 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[--induction_bucket[BUCKETS_INDEX2(T[p], 0)]] = p; } } #if defined(_OPENMP) static void libsais_radix_sort_lms_suffixes_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0]]); libsais_prefetch(&T[SA[i + prefetch_distance + 1]]); libsais_prefetch(&T[SA[i + prefetch_distance + 2]]); libsais_prefetch(&T[SA[i + prefetch_distance + 3]]); libsais_prefetchw(&cache[i + prefetch_distance]); cache[i + 0].symbol = T[cache[i + 0].index = SA[i + 0]]; cache[i + 1].symbol = T[cache[i + 1].index = SA[i + 1]]; cache[i + 2].symbol = T[cache[i + 2].index = SA[i + 2]]; cache[i + 3].symbol = T[cache[i + 3].index = SA[i + 3]]; } for (j += prefetch_distance + 3; i < j; i += 1) { cache[i].symbol = T[cache[i].index = SA[i]]; } } static void libsais_radix_sort_lms_suffixes_32s_6k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 0].symbol]); libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 1].symbol]); libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 2].symbol]); libsais_prefetchw(&induction_bucket[cache[i - prefetch_distance - 3].symbol]); cache[i - 0].symbol = --induction_bucket[cache[i - 0].symbol]; cache[i - 1].symbol = --induction_bucket[cache[i - 1].symbol]; cache[i - 2].symbol = --induction_bucket[cache[i - 2].symbol]; cache[i - 3].symbol = --induction_bucket[cache[i - 3].symbol]; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { cache[i].symbol = --induction_bucket[cache[i].symbol]; } } static void libsais_radix_sort_lms_suffixes_32s_2k_block_sort(sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 3; i >= j; i -= 4) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 0].symbol, 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 1].symbol, 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 2].symbol, 0)]); libsais_prefetchw(&induction_bucket[BUCKETS_INDEX2(cache[i - prefetch_distance - 3].symbol, 0)]); cache[i - 0].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 0].symbol, 0)]; cache[i - 1].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 1].symbol, 0)]; cache[i - 2].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 2].symbol, 0)]; cache[i - 3].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i - 3].symbol, 0)]; } for (j -= prefetch_distance + 3; i >= j; i -= 1) { cache[i].symbol = --induction_bucket[BUCKETS_INDEX2(cache[i].symbol, 0)]; } } static void libsais_radix_sort_lms_suffixes_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_radix_sort_lms_suffixes_32s_6k_block_sort(induction_bucket, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } static void libsais_radix_sort_lms_suffixes_32s_2k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_radix_sort_lms_suffixes_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_radix_sort_lms_suffixes_32s_2k_block_sort(induction_bucket, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static void libsais_radix_sort_lms_suffixes_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || m < 65536) { libsais_radix_sort_lms_suffixes_32s_6k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; } libsais_radix_sort_lms_suffixes_32s_6k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static void libsais_radix_sort_lms_suffixes_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || m < 65536) { libsais_radix_sort_lms_suffixes_32s_2k(T, SA, induction_bucket, (fast_sint_t)n - (fast_sint_t)m + 1, (fast_sint_t)m - 1); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < (fast_sint_t)m - 1; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end >= m) { block_end = (fast_sint_t)m - 1; } libsais_radix_sort_lms_suffixes_32s_2k_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, (fast_sint_t)n - block_end, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais_radix_sort_lms_suffixes_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; sa_sint_t i = n - 2; sa_sint_t m = 0; fast_uint_t s = 1; fast_sint_t c0 = T[n - 1]; fast_sint_t c1 = 0; fast_sint_t c2 = 0; for (; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&T[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[T[i - prefetch_distance - 0]]); libsais_prefetchw(&buckets[T[i - prefetch_distance - 1]]); libsais_prefetchw(&buckets[T[i - prefetch_distance - 2]]); libsais_prefetchw(&buckets[T[i - prefetch_distance - 3]]); c1 = T[i - 0]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i + 1; m++; } c0 = T[i - 1]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 0; m++; } c1 = T[i - 2]; s = (s << 1) + (fast_uint_t)(c1 > (c0 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c0]] = i - 1; m++; } c0 = T[i - 3]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i - 2; m++; } } for (; i >= 0; i -= 1) { c1 = c0; c0 = T[i]; s = (s << 1) + (fast_uint_t)(c0 > (c1 - (fast_sint_t)(s & 1))); if ((s & 3) == 1) { SA[--buckets[c2 = c1]] = i + 1; m++; } } if (m > 1) { SA[buckets[c2]] = 0; } return m; } static void libsais_radix_sort_set_markers_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&induction_bucket[i + 2 * prefetch_distance]); libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 0]]); libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 1]]); libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 2]]); libsais_prefetchw(&SA[induction_bucket[i + prefetch_distance + 3]]); SA[induction_bucket[i + 0]] |= SAINT_MIN; SA[induction_bucket[i + 1]] |= SAINT_MIN; SA[induction_bucket[i + 2]] |= SAINT_MIN; SA[induction_bucket[i + 3]] |= SAINT_MIN; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[induction_bucket[i]] |= SAINT_MIN; } } static void libsais_radix_sort_set_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&induction_bucket[BUCKETS_INDEX2(i + 2 * prefetch_distance, 0)]); libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 0, 0)]]); libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 1, 0)]]); libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 2, 0)]]); libsais_prefetchw(&SA[induction_bucket[BUCKETS_INDEX2(i + prefetch_distance + 3, 0)]]); SA[induction_bucket[BUCKETS_INDEX2(i + 0, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 1, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 2, 0)]] |= SUFFIX_GROUP_MARKER; SA[induction_bucket[BUCKETS_INDEX2(i + 3, 0)]] |= SUFFIX_GROUP_MARKER; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[induction_bucket[BUCKETS_INDEX2(i, 0)]] |= SUFFIX_GROUP_MARKER; } } static void libsais_radix_sort_set_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)k - 1; #endif libsais_radix_sort_set_markers_32s_6k(SA, induction_bucket, omp_block_start, omp_block_size); } } static void libsais_radix_sort_set_markers_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && k >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)k - 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)k - 1 - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)k - 1; #endif libsais_radix_sort_set_markers_32s_4k(SA, induction_bucket, omp_block_start, omp_block_size); } } static void libsais_initialize_buckets_for_partial_sorting_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; buckets[BUCKETS_INDEX4((fast_uint_t)T[first_lms_suffix], 1)]++; fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0; for (i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4(ALPHABET_SIZE - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; sum0 += buckets[i + BUCKETS_INDEX4(0, 0)] + buckets[i + BUCKETS_INDEX4(0, 2)]; sum1 += buckets[i + BUCKETS_INDEX4(0, 1)]; buckets[j + BUCKETS_INDEX2(0, 0)] = sum0; buckets[j + BUCKETS_INDEX2(0, 1)] = sum1; } } static void libsais_initialize_buckets_for_partial_sorting_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i, j; sa_sint_t sum0 = left_suffixes_count + 1, sum1 = 0, sum2 = 0; for (first_lms_suffix = T[first_lms_suffix], i = BUCKETS_INDEX4(0, 0), j = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX4((fast_sint_t)first_lms_suffix - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)]; sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)]; sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)]; sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)]; buckets[i + BUCKETS_INDEX4(0, 0)] = sum0; buckets[i + BUCKETS_INDEX4(0, 1)] = sum2; buckets[i + BUCKETS_INDEX4(0, 2)] = 0; buckets[i + BUCKETS_INDEX4(0, 3)] = 0; sum0 += SS + SL; sum1 += LS; sum2 += LS + LL; temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1; } for (sum1 += 1; i <= BUCKETS_INDEX4((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX4(1, 0), j += BUCKETS_INDEX2(1, 0)) { sa_sint_t SS = buckets[i + BUCKETS_INDEX4(0, 0)]; sa_sint_t LS = buckets[i + BUCKETS_INDEX4(0, 1)]; sa_sint_t SL = buckets[i + BUCKETS_INDEX4(0, 2)]; sa_sint_t LL = buckets[i + BUCKETS_INDEX4(0, 3)]; buckets[i + BUCKETS_INDEX4(0, 0)] = sum0; buckets[i + BUCKETS_INDEX4(0, 1)] = sum2; buckets[i + BUCKETS_INDEX4(0, 2)] = 0; buckets[i + BUCKETS_INDEX4(0, 3)] = 0; sum0 += SS + SL; sum1 += LS; sum2 += LS + LL; temp_bucket[j + BUCKETS_INDEX2(0, 0)] = sum0; temp_bucket[j + BUCKETS_INDEX2(0, 1)] = sum1; } } static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } return d; } #if defined(_OPENMP) static void libsais_partial_sorting_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; sa_sint_t d = 1; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = cache[count].index = SA[i + 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d; sa_sint_t p1 = cache[count].index = SA[i + 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); induction_bucket[v]++; distinct_names[v] = d; } state[0].state.position = (fast_sint_t)d - 1; state[0].state.count = count; } static void libsais_partial_sorting_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = 0, j = count - 1; i < j; i += 2) { libsais_prefetch(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol; SA[induction_bucket[v0]++] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol; SA[induction_bucket[v1]++] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += 1; i < j; i += 1) { sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol; SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_left_to_right_8u(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]); } #pragma omp barrier #pragma omp master { sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE]; fast_sint_t c; for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A + B; temp_induction_bucket[c] = A; } for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; } d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position; } } #pragma omp barrier { libsais_partial_sorting_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position); } } #endif } return d; } #endif static sa_sint_t libsais_partial_sorting_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t * RESTRICT induction_bucket = &buckets[4 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; SA[induction_bucket[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN; distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] >= T[n - 1])] = ++d; if (threads == 1 || left_suffixes_count < 65536) { d = libsais_partial_sorting_scan_left_to_right_8u(T, SA, buckets, d, 0, left_suffixes_count); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < left_suffixes_count; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > left_suffixes_count) { block_max_end = left_suffixes_count;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] >= T[p - 1]); SA[induction_bucket[v]++] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } else { d = libsais_partial_sorting_scan_left_to_right_8u_block_omp(T, SA, buckets, d, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 3 * prefetch_distance]); libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + 2 * prefetch_distance + 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i + prefetch_distance + 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais_prefetchw(&buckets[v0]); sa_sint_t p1 = SA[i + prefetch_distance + 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais_prefetchw(&buckets[v1]); sa_sint_t p2 = SA[i + 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] >= T[p2 - 1]); SA[buckets[v2]++] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d; sa_sint_t p3 = SA[i + 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] >= T[p3 - 1]); SA[buckets[v3]++] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d; } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]); SA[buckets[v]++] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; } return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts2]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts3]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { SA[i + 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]); SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { SA[i + 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]); SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]); SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; } } return d; } static void libsais_partial_sorting_scan_left_to_right_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { SA[i + 0] = 0; SA[induction_bucket[T[p0 - 1]]++] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { SA[i + 1] = 0; SA[induction_bucket[T[p1 - 1]]++] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { SA[i] = 0; SA[induction_bucket[T[p - 1]]++] = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static void libsais_partial_sorting_scan_left_to_right_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] >= T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] >= T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] >= T[p - 1]); } cache[i].symbol = symbol; } } static void libsais_partial_sorting_scan_left_to_right_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] < T[p0 - 1]); p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] < T[p1 - 1]); p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] < T[p - 1]); p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX; } } static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] < T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; p0 = 0; } cache[i + 0].symbol = symbol0; SA[i + 0] = p0 & SAINT_MAX; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] < T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; p1 = 0; } cache[i + 1].symbol = symbol1; SA[i + 1] = p1 & SAINT_MAX; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] < T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; p = 0; } cache[i].symbol = symbol; SA[i] = p & SAINT_MAX; } } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&cache[i + 2 * prefetch_distance]); libsais_prefetchw(&buckets[cache[i + prefetch_distance + 0].symbol]); libsais_prefetchw(&buckets[cache[i + prefetch_distance + 1].symbol]); sa_sint_t v0 = cache[i + 0].symbol, p0 = cache[i + 0].index; d += (p0 < 0); cache[i + 0].symbol = buckets[v0]++; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t s = cache[i + 0].symbol, q = (cache[s].index = cache[i + 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } sa_sint_t v1 = cache[i + 1].symbol, p1 = cache[i + 1].index; d += (p1 < 0); cache[i + 1].symbol = buckets[v1]++; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t s = cache[i + 1].symbol, q = (cache[s].index = cache[i + 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = buckets[v]++; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; if (cache[i].symbol < omp_block_end) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] >= T[q - 1]); } } return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais_prefetchw(s0 >= 0 ? Ds0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais_prefetchw(s1 >= 0 ? Ds1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { sa_sint_t p0 = cache[i + 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 0].symbol = induction_bucket[v0 >> 1]++; cache[i + 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 0].index = np & SAINT_MAX; } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { sa_sint_t p1 = cache[i + 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i + 1].symbol = induction_bucket[v1 >> 1]++; cache[i + 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i + 1].index = np & SAINT_MAX; } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = induction_bucket[v >> 1]++; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] < T[np - 1]); np = 0; } cache[i].index = np & SAINT_MAX; } } } return d; } static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { cache[i + 0].symbol = induction_bucket[v0]++; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 0].index = np & SAINT_MAX; } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { cache[i + 1].symbol = induction_bucket[v1]++; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i + 1].index = np & SAINT_MAX; } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = induction_bucket[v]++; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] < T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; np = 0; } cache[i].index = np & SAINT_MAX; } } } } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_left_to_right_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais_partial_sorting_scan_left_to_right_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_left_to_right_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais_partial_sorting_scan_left_to_right_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static void libsais_partial_sorting_scan_left_to_right_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_left_to_right_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_partial_sorting_scan_left_to_right_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[buckets[BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])]++] = (n - 1) | SAINT_MIN; buckets[2 + BUCKETS_INDEX4(T[n - 1], T[n - 2] >= T[n - 1])] = ++d; if (threads == 1 || left_suffixes_count < 65536) { d = libsais_partial_sorting_scan_left_to_right_32s_6k(T, SA, buckets, d, 0, left_suffixes_count); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < left_suffixes_count; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > left_suffixes_count) { block_end = left_suffixes_count; } d = libsais_partial_sorting_scan_left_to_right_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais_partial_sorting_scan_left_to_right_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t * RESTRICT induction_bucket = &buckets[2 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)) | SUFFIX_GROUP_MARKER; distinct_names[BUCKETS_INDEX2(T[n - 1], T[n - 2] < T[n - 1])] = ++d; if (threads == 1 || n < 65536) { d = libsais_partial_sorting_scan_left_to_right_32s_4k(T, SA, k, buckets, d, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } d = libsais_partial_sorting_scan_left_to_right_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif return d; } static void libsais_partial_sorting_scan_left_to_right_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[buckets[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais_partial_sorting_scan_left_to_right_32s_1k(T, SA, buckets, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } libsais_partial_sorting_scan_left_to_right_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static void libsais_partial_sorting_shift_markers_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, const sa_sint_t * RESTRICT buckets, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * ALPHABET_SIZE]; fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536) #else UNUSED(threads); UNUSED(n); #endif for (c = BUCKETS_INDEX2(ALPHABET_SIZE - 1, 0); c >= BUCKETS_INDEX2(1, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t i, j; sa_sint_t s = SAINT_MIN; for (i = (fast_sint_t)temp_bucket[c] - 1, j = (fast_sint_t)buckets[c - BUCKETS_INDEX2(1, 0)] + 3; i >= j; i -= 4) { libsais_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q; } } } static void libsais_partial_sorting_shift_markers_32s_6k_omp(sa_sint_t * RESTRICT SA, sa_sint_t k, const sa_sint_t * RESTRICT buckets, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && k >= 65536) #else UNUSED(threads); #endif for (c = (fast_sint_t)k - 1; c >= 1; c -= 1) { fast_sint_t i, j; sa_sint_t s = SAINT_MIN; for (i = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 0)] - 1, j = (fast_sint_t)temp_bucket[BUCKETS_INDEX2(c - 1, 0)] + 3; i >= j; i -= 4) { libsais_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = (p0 & SAINT_MIN) ^ s; s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = (p1 & SAINT_MIN) ^ s; s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = (p2 & SAINT_MIN) ^ s; s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = (p3 & SAINT_MIN) ^ s; s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i], q = (p & SAINT_MIN) ^ s; s = s ^ q; SA[i] = p ^ q; } } } static void libsais_partial_sorting_shift_markers_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; fast_sint_t i; sa_sint_t s = SUFFIX_GROUP_MARKER; for (i = (fast_sint_t)n - 1; i >= 3; i -= 4) { libsais_prefetchw(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0], q0 = ((p0 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p0 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q0; SA[i - 0] = p0 ^ q0; sa_sint_t p1 = SA[i - 1], q1 = ((p1 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p1 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q1; SA[i - 1] = p1 ^ q1; sa_sint_t p2 = SA[i - 2], q2 = ((p2 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p2 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q2; SA[i - 2] = p2 ^ q2; sa_sint_t p3 = SA[i - 3], q3 = ((p3 & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p3 > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q3; SA[i - 3] = p3 ^ q3; } for (; i >= 0; i -= 1) { sa_sint_t p = SA[i], q = ((p & SUFFIX_GROUP_MARKER) ^ s) & ((sa_sint_t)(p > 0) << ((SUFFIX_GROUP_BIT - 1))); s = s ^ q; SA[i] = p ^ q; } } static void libsais_partial_sorting_shift_buckets_32s_6k(sa_sint_t k, sa_sint_t * RESTRICT buckets) { sa_sint_t * RESTRICT temp_bucket = &buckets[4 * k]; fast_sint_t i; for (i = BUCKETS_INDEX2(0, 0); i <= BUCKETS_INDEX2((fast_sint_t)k - 1, 0); i += BUCKETS_INDEX2(1, 0)) { buckets[2 * i + BUCKETS_INDEX4(0, 0)] = temp_bucket[i + BUCKETS_INDEX2(0, 0)]; buckets[2 * i + BUCKETS_INDEX4(0, 1)] = temp_bucket[i + BUCKETS_INDEX2(0, 1)]; } } static sa_sint_t libsais_partial_sorting_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetch(&SA[i - 2 * prefetch_distance]); libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } return d; } #if defined(_OPENMP) static void libsais_partial_sorting_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size, LIBSAIS_THREAD_STATE * RESTRICT state) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; memset(buckets, 0, 4 * ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; sa_sint_t d = 1; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetch(&SA[i - 2 * prefetch_distance]); libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - prefetch_distance - 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = cache[count].index = SA[i - 0]; d += (p0 < 0); p0 &= SAINT_MAX; sa_sint_t v0 = cache[count++].symbol = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); induction_bucket[v0]++; distinct_names[v0] = d; sa_sint_t p1 = cache[count].index = SA[i - 1]; d += (p1 < 0); p1 &= SAINT_MAX; sa_sint_t v1 = cache[count++].symbol = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); induction_bucket[v1]++; distinct_names[v1] = d; } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = cache[count].index = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = cache[count++].symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); induction_bucket[v]++; distinct_names[v] = d; } state[0].state.position = (fast_sint_t)d - 1; state[0].state.count = count; } static void libsais_partial_sorting_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count, sa_sint_t d) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t i, j; for (i = 0, j = count - 1; i < j; i += 2) { libsais_prefetch(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index; d += (p0 < 0); sa_sint_t v0 = cache[i + 0].symbol; SA[--induction_bucket[v0]] = (p0 - 1) | ((sa_sint_t)(distinct_names[v0] != d) << (SAINT_BIT - 1)); distinct_names[v0] = d; sa_sint_t p1 = cache[i + 1].index; d += (p1 < 0); sa_sint_t v1 = cache[i + 1].symbol; SA[--induction_bucket[v1]] = (p1 - 1) | ((sa_sint_t)(distinct_names[v1] != d) << (SAINT_BIT - 1)); distinct_names[v1] = d; } for (j += 1; i < j; i += 1) { sa_sint_t p = cache[i].index; d += (p < 0); sa_sint_t v = cache[i].symbol; SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } static sa_sint_t libsais_partial_sorting_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_right_to_left_8u(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size, &thread_state[omp_thread_num]); } #pragma omp barrier #pragma omp master { sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_induction_bucket = &thread_state[t].state.buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT temp_distinct_names = &thread_state[t].state.buckets[2 * ALPHABET_SIZE]; fast_sint_t c; for (c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_induction_bucket[c]; induction_bucket[c] = A - B; temp_induction_bucket[c] = A; } for (d -= 1, c = 0; c < 2 * ALPHABET_SIZE; c += 1) { sa_sint_t A = distinct_names[c], B = temp_distinct_names[c], D = B + d; distinct_names[c] = B > 0 ? D : A; temp_distinct_names[c] = A; } d += 1 + (sa_sint_t)thread_state[t].state.position; thread_state[t].state.position = (fast_sint_t)d - thread_state[t].state.position; } } #pragma omp barrier { libsais_partial_sorting_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count, (sa_sint_t)thread_state[omp_thread_num].state.position); } } #endif } return d; } #endif static void libsais_partial_sorting_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1; fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix; if (threads == 1 || (scan_end - scan_start) < 65536) { libsais_partial_sorting_scan_right_to_left_8u(T, SA, buckets, d, scan_start, scan_end - scan_start); } #if defined(_OPENMP) else { sa_sint_t * RESTRICT induction_bucket = &buckets[0 * ALPHABET_SIZE]; sa_sint_t * RESTRICT distinct_names = &buckets[2 * ALPHABET_SIZE]; fast_sint_t block_start; for (block_start = scan_end - 1; block_start >= scan_start; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < scan_start) { block_max_end = scan_start - 1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[v]] = (p - 1) | ((sa_sint_t)(distinct_names[v] != d) << (SAINT_BIT - 1)); distinct_names[v] = d; } } else { d = libsais_partial_sorting_scan_right_to_left_8u_block_omp(T, SA, buckets, d, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetch(&SA[i - 3 * prefetch_distance]); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i - 2 * prefetch_distance - 1] & SAINT_MAX] - 2); sa_sint_t p0 = SA[i - prefetch_distance - 0] & SAINT_MAX; sa_sint_t v0 = BUCKETS_INDEX4(T[p0 - (p0 > 0)], 0); libsais_prefetchw(&buckets[v0]); sa_sint_t p1 = SA[i - prefetch_distance - 1] & SAINT_MAX; sa_sint_t v1 = BUCKETS_INDEX4(T[p1 - (p1 > 0)], 0); libsais_prefetchw(&buckets[v1]); sa_sint_t p2 = SA[i - 0]; d += (p2 < 0); p2 &= SAINT_MAX; sa_sint_t v2 = BUCKETS_INDEX4(T[p2 - 1], T[p2 - 2] > T[p2 - 1]); SA[--buckets[v2]] = (p2 - 1) | ((sa_sint_t)(buckets[2 + v2] != d) << (SAINT_BIT - 1)); buckets[2 + v2] = d; sa_sint_t p3 = SA[i - 1]; d += (p3 < 0); p3 &= SAINT_MAX; sa_sint_t v3 = BUCKETS_INDEX4(T[p3 - 1], T[p3 - 2] > T[p3 - 1]); SA[--buckets[v3]] = (p3 - 1) | ((sa_sint_t)(buckets[2 + v3] != d) << (SAINT_BIT - 1)); buckets[2 + v3] = d; } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; d += (p < 0); p &= SAINT_MAX; sa_sint_t v = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]); SA[--buckets[v]] = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; } return d; } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { const fast_sint_t Ts2 = T[(s2 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts2]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts2, 0)]); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { const fast_sint_t Ts3 = T[(s3 & ~SUFFIX_GROUP_MARKER) - 1]; libsais_prefetchw(&induction_bucket[Ts3]); libsais_prefetchw(&distinct_names[BUCKETS_INDEX2(Ts3, 0)]); } sa_sint_t p0 = SA[i - 0]; if (p0 > 0) { SA[i - 0] = 0; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); p0 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; } sa_sint_t p1 = SA[i - 1]; if (p1 > 0) { SA[i - 1] = 0; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); p1 &= ~SUFFIX_GROUP_MARKER; sa_sint_t v1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; if (p > 0) { SA[i] = 0; d += (p >> (SUFFIX_GROUP_BIT - 1)); p &= ~SUFFIX_GROUP_MARKER; sa_sint_t v = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; } } return d; } static void libsais_partial_sorting_scan_right_to_left_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i - 0]; if (p0 > 0) { SA[i - 0] = 0; SA[--induction_bucket[T[p0 - 1]]] = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; if (p1 > 0) { SA[i - 1] = 0; SA[--induction_bucket[T[p1 - 1]]] = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; if (p > 0) { SA[i] = 0; SA[--induction_bucket[T[p - 1]]] = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static void libsais_partial_sorting_scan_right_to_left_32s_6k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 0] & SAINT_MAX] - 2); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 1); libsais_prefetch(&T[SA[i + prefetch_distance + 1] & SAINT_MAX] - 2); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t p0 = cache[i + 0].index = SA[i + 0]; sa_sint_t symbol0 = 0; p0 &= SAINT_MAX; if (p0 != 0) { symbol0 = BUCKETS_INDEX4(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t p1 = cache[i + 1].index = SA[i + 1]; sa_sint_t symbol1 = 0; p1 &= SAINT_MAX; if (p1 != 0) { symbol1 = BUCKETS_INDEX4(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = cache[i].index = SA[i]; sa_sint_t symbol = 0; p &= SAINT_MAX; if (p != 0) { symbol = BUCKETS_INDEX4(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol; } } static void libsais_partial_sorting_scan_right_to_left_32s_4k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1 & ~SUFFIX_GROUP_MARKER] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = p0; p0 &= ~SUFFIX_GROUP_MARKER; symbol0 = BUCKETS_INDEX2(T[p0 - 1], T[p0 - 2] > T[p0 - 1]); } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = p1; p1 &= ~SUFFIX_GROUP_MARKER; symbol1 = BUCKETS_INDEX2(T[p1 - 1], T[p1 - 2] > T[p1 - 1]); } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = p; p &= ~SUFFIX_GROUP_MARKER; symbol = BUCKETS_INDEX2(T[p - 1], T[p - 2] > T[p - 1]); } cache[i].symbol = symbol; } } static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; if (p0 > 0) { SA[i + 0] = 0; cache[i + 0].index = (p0 - 1) | ((sa_sint_t)(T[p0 - 2] > T[p0 - 1]) << (SAINT_BIT - 1)); symbol0 = T[p0 - 1]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; if (p1 > 0) { SA[i + 1] = 0; cache[i + 1].index = (p1 - 1) | ((sa_sint_t)(T[p1 - 2] > T[p1 - 1]) << (SAINT_BIT - 1)); symbol1 = T[p1 - 1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; if (p > 0) { SA[i] = 0; cache[i].index = (p - 1) | ((sa_sint_t)(T[p - 2] > T[p - 1]) << (SAINT_BIT - 1)); symbol = T[p - 1]; } cache[i].symbol = symbol; } } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); libsais_prefetchw(&buckets[cache[i - prefetch_distance - 0].symbol]); libsais_prefetchw(&buckets[cache[i - prefetch_distance - 1].symbol]); sa_sint_t v0 = cache[i - 0].symbol, p0 = cache[i - 0].index; d += (p0 < 0); cache[i - 0].symbol = --buckets[v0]; cache[i - 0].index = (p0 - 1) | ((sa_sint_t)(buckets[2 + v0] != d) << (SAINT_BIT - 1)); buckets[2 + v0] = d; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t s = cache[i - 0].symbol, q = (cache[s].index = cache[i - 0].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } sa_sint_t v1 = cache[i - 1].symbol, p1 = cache[i - 1].index; d += (p1 < 0); cache[i - 1].symbol = --buckets[v1]; cache[i - 1].index = (p1 - 1) | ((sa_sint_t)(buckets[2 + v1] != d) << (SAINT_BIT - 1)); buckets[2 + v1] = d; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t s = cache[i - 1].symbol, q = (cache[s].index = cache[i - 1].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol, p = cache[i].index; d += (p < 0); cache[i].symbol = --buckets[v]; cache[i].index = (p - 1) | ((sa_sint_t)(buckets[2 + v] != d) << (SAINT_BIT - 1)); buckets[2 + v] = d; if (cache[i].symbol >= omp_block_start) { sa_sint_t s = cache[i].symbol, q = (cache[s].index = cache[i].index) & SAINT_MAX; cache[s].symbol = BUCKETS_INDEX4(T[q - 1], T[q - 2] > T[q - 1]); } } return d; } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT induction_bucket = &buckets[3 * k]; sa_sint_t * RESTRICT distinct_names = &buckets[0 * k]; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0 >> 1]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); const sa_sint_t * Ds0 = &distinct_names[s0]; libsais_prefetchw(s0 >= 0 ? Ds0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1 >> 1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); const sa_sint_t * Ds1 = &distinct_names[s1]; libsais_prefetchw(s1 >= 0 ? Ds1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { sa_sint_t p0 = cache[i - 0].index; d += (p0 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 0].symbol = --induction_bucket[v0 >> 1]; cache[i - 0].index = (p0 - 1) | (v0 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v0] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v0] = d; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { sa_sint_t p1 = cache[i - 1].index; d += (p1 >> (SUFFIX_GROUP_BIT - 1)); cache[i - 1].symbol = --induction_bucket[v1 >> 1]; cache[i - 1].index = (p1 - 1) | (v1 << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v1] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v1] = d; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { sa_sint_t p = cache[i].index; d += (p >> (SUFFIX_GROUP_BIT - 1)); cache[i].symbol = --induction_bucket[v >> 1]; cache[i].index = (p - 1) | (v << (SAINT_BIT - 1)) | ((sa_sint_t)(distinct_names[v] != d) << (SUFFIX_GROUP_BIT - 1)); distinct_names[v] = d; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = np; np &= ~SUFFIX_GROUP_MARKER; cache[ni].symbol = BUCKETS_INDEX2(T[np - 1], T[np - 2] > T[np - 1]); } } } } return d; } static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { cache[i - 0].symbol = --induction_bucket[v0]; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; if (np > 0) { cache[i - 0].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { cache[i - 1].symbol = --induction_bucket[v1]; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; if (np > 0) { cache[i - 1].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; }} } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = --induction_bucket[v]; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; if (np > 0) { cache[i].index = 0; cache[ni].index = (np - 1) | ((sa_sint_t)(T[np - 2] > T[np - 1]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np - 1]; } } } } } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_right_to_left_32s_6k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais_partial_sorting_scan_right_to_left_32s_6k_block_sort(T, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { d = libsais_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_right_to_left_32s_4k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { d = libsais_partial_sorting_scan_right_to_left_32s_4k_block_sort(T, k, buckets, d, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } return d; } static void libsais_partial_sorting_scan_right_to_left_32s_1k_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_partial_sorting_scan_right_to_left_32s_1k_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_partial_sorting_scan_right_to_left_32s_1k_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { fast_sint_t scan_start = (fast_sint_t)left_suffixes_count + 1; fast_sint_t scan_end = (fast_sint_t)n - (fast_sint_t)first_lms_suffix; if (threads == 1 || (scan_end - scan_start) < 65536) { d = libsais_partial_sorting_scan_right_to_left_32s_6k(T, SA, buckets, d, scan_start, scan_end - scan_start); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = scan_end - 1; block_start >= scan_start; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < scan_start) { block_end = scan_start - 1; } d = libsais_partial_sorting_scan_right_to_left_32s_6k_block_omp(T, SA, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif return d; } static sa_sint_t libsais_partial_sorting_scan_right_to_left_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t d, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { d = libsais_partial_sorting_scan_right_to_left_32s_4k(T, SA, k, buckets, d, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } d = libsais_partial_sorting_scan_right_to_left_32s_4k_block_omp(T, SA, k, buckets, d, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif return d; } static void libsais_partial_sorting_scan_right_to_left_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais_partial_sorting_scan_right_to_left_32s_1k(T, SA, buckets, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } libsais_partial_sorting_scan_right_to_left_32s_1k_block_omp(T, SA, buckets, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif } static fast_sint_t libsais_partial_sorting_gather_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais_prefetch(&SA[i + prefetch_distance]); sa_sint_t s0 = SA[i + 0]; SA[l] = (s0 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s0 < 0); sa_sint_t s1 = SA[i + 1]; SA[l] = (s1 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s1 < 0); sa_sint_t s2 = SA[i + 2]; SA[l] = (s2 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s2 < 0); sa_sint_t s3 = SA[i + 3]; SA[l] = (s3 - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s3 < 0); } for (j += 3; i < j; i += 1) { sa_sint_t s = SA[i]; SA[l] = (s - SUFFIX_GROUP_MARKER) & (~SUFFIX_GROUP_MARKER); l += (s < 0); } return l; } static fast_sint_t libsais_partial_sorting_gather_lms_suffixes_32s_1k(sa_sint_t * RESTRICT SA, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, l; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3, l = omp_block_start; i < j; i += 4) { libsais_prefetch(&SA[i + prefetch_distance]); sa_sint_t s0 = SA[i + 0]; SA[l] = s0 & SAINT_MAX; l += (s0 < 0); sa_sint_t s1 = SA[i + 1]; SA[l] = s1 & SAINT_MAX; l += (s1 < 0); sa_sint_t s2 = SA[i + 2]; SA[l] = s2 & SAINT_MAX; l += (s2 < 0); sa_sint_t s3 = SA[i + 3]; SA[l] = s3 & SAINT_MAX; l += (s3 < 0); } for (j += 3; i < j; i += 1) { sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l += (s < 0); } return l; } static void libsais_partial_sorting_gather_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start; thread_state[omp_thread_num].state.count = libsais_partial_sorting_gather_lms_suffixes_32s_4k(SA, omp_block_start, omp_block_size) - omp_block_start; } #pragma omp barrier #pragma omp master { fast_sint_t t, position = 0; for (t = 0; t < omp_num_threads; ++t) { if (t > 0 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } position += thread_state[t].state.count; } } } #endif } } static void libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = omp_block_start; thread_state[omp_thread_num].state.count = libsais_partial_sorting_gather_lms_suffixes_32s_1k(SA, omp_block_start, omp_block_size) - omp_block_start; } #pragma omp barrier #pragma omp master { fast_sint_t t, position = 0; for (t = 0; t < omp_num_threads; ++t) { if (t > 0 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } position += thread_state[t].state.count; } } } #endif } } static void libsais_induce_partial_order_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&buckets[2 * ALPHABET_SIZE], 0, 2 * ALPHABET_SIZE * sizeof(sa_sint_t)); sa_sint_t d = libsais_partial_sorting_scan_left_to_right_8u_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state); libsais_partial_sorting_shift_markers_8u_omp(SA, n, buckets, threads); libsais_partial_sorting_scan_right_to_left_8u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state); } static void libsais_induce_partial_order_32s_6k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t first_lms_suffix, sa_sint_t left_suffixes_count, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t d = libsais_partial_sorting_scan_left_to_right_32s_6k_omp(T, SA, n, buckets, left_suffixes_count, 0, threads, thread_state); libsais_partial_sorting_shift_markers_32s_6k_omp(SA, k, buckets, threads); libsais_partial_sorting_shift_buckets_32s_6k(k, buckets); libsais_partial_sorting_scan_right_to_left_32s_6k_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, d, threads, thread_state); } static void libsais_induce_partial_order_32s_4k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(buckets, 0, 2 * (size_t)k * sizeof(sa_sint_t)); sa_sint_t d = libsais_partial_sorting_scan_left_to_right_32s_4k_omp(T, SA, n, k, buckets, 0, threads, thread_state); libsais_partial_sorting_shift_markers_32s_4k(SA, n); libsais_partial_sorting_scan_right_to_left_32s_4k_omp(T, SA, n, k, buckets, d, threads, thread_state); libsais_partial_sorting_gather_lms_suffixes_32s_4k_omp(SA, n, threads, thread_state); } static void libsais_induce_partial_order_32s_2k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, &buckets[1 * k], threads, thread_state); libsais_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, &buckets[0 * k], threads, thread_state); libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state); } static void libsais_induce_partial_order_32s_1k_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_start_32s_1k(k, buckets); libsais_partial_sorting_scan_left_to_right_32s_1k_omp(T, SA, n, buckets, threads, thread_state); libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_end_32s_1k(k, buckets); libsais_partial_sorting_scan_right_to_left_32s_1k_omp(T, SA, n, buckets, threads, thread_state); libsais_partial_sorting_gather_lms_suffixes_32s_1k_omp(SA, n, threads, thread_state); } static sa_sint_t libsais_renumber_lms_suffixes_8u(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]); sa_sint_t p0 = SA[i + 0]; SAm[(p0 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p0 < 0; sa_sint_t p1 = SA[i + 1]; SAm[(p1 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p1 < 0; sa_sint_t p2 = SA[i + 2]; SAm[(p2 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p2 < 0; sa_sint_t p3 = SA[i + 3]; SAm[(p3 & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p3 < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { sa_sint_t p = SA[i]; SAm[(p & SAINT_MAX) >> 1] = name | SAINT_MIN; name += p < 0; } return name; } static fast_sint_t libsais_gather_marked_suffixes_8u(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; l -= 1; fast_sint_t i, j; for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - prefetch_distance]); sa_sint_t s0 = SA[i - 0]; SA[l] = s0 & SAINT_MAX; l -= s0 < 0; sa_sint_t s1 = SA[i - 1]; SA[l] = s1 & SAINT_MAX; l -= s1 < 0; sa_sint_t s2 = SA[i - 2]; SA[l] = s2 & SAINT_MAX; l -= s2 < 0; sa_sint_t s3 = SA[i - 3]; SA[l] = s3 & SAINT_MAX; l -= s3 < 0; } for (j -= 3; i >= j; i -= 1) { sa_sint_t s = SA[i]; SA[l] = s & SAINT_MAX; l -= s < 0; } l += 1; return l; } static sa_sint_t libsais_renumber_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t name = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { name = libsais_renumber_lms_suffixes_8u(SA, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais_renumber_lms_suffixes_8u(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return name; } static void libsais_gather_marked_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; if (omp_num_threads == 1) { libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { if (omp_thread_num < omp_num_threads - 1) { thread_state[omp_thread_num].state.position = libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)m + omp_block_start + omp_block_size, omp_block_start, omp_block_size); thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size - thread_state[omp_thread_num].state.position; } else { thread_state[omp_thread_num].state.position = libsais_gather_marked_suffixes_8u(SA, m, (fast_sint_t)n + (fast_sint_t)fs, omp_block_start, omp_block_size); thread_state[omp_thread_num].state.count = (fast_sint_t)n + (fast_sint_t)fs - thread_state[omp_thread_num].state.position; } } #pragma omp barrier #pragma omp master { fast_sint_t t, position = (fast_sint_t)n + (fast_sint_t)fs; for (t = omp_num_threads - 1; t >= 0; --t) { position -= thread_state[t].state.count; if (t != omp_num_threads - 1 && thread_state[t].state.count > 0) { memmove(&SA[position], &SA[thread_state[t].state.position], (size_t)thread_state[t].state.count * sizeof(sa_sint_t)); } } } } #endif } } static sa_sint_t libsais_renumber_and_gather_lms_suffixes_8u_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t)); sa_sint_t name = libsais_renumber_lms_suffixes_8u_omp(SA, m, threads, thread_state); if (name < m) { libsais_gather_marked_lms_suffixes_8u_omp(SA, n, m, fs, threads, thread_state); } else { fast_sint_t i; for (i = 0; i < m; i += 1) { SA[i] &= SAINT_MAX; } } return name; } static sa_sint_t libsais_renumber_distinct_lms_suffixes_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t name, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 0] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 1] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 2] & SAINT_MAX) >> 1]); libsais_prefetchw(&SAm[(SA[i + prefetch_distance + 3] & SAINT_MAX) >> 1]); p0 = SA[i + 0]; SAm[(SA[i + 0] = p0 & SAINT_MAX) >> 1] = name | (p0 & p3 & SAINT_MIN); name += p0 < 0; p1 = SA[i + 1]; SAm[(SA[i + 1] = p1 & SAINT_MAX) >> 1] = name | (p1 & p0 & SAINT_MIN); name += p1 < 0; p2 = SA[i + 2]; SAm[(SA[i + 2] = p2 & SAINT_MAX) >> 1] = name | (p2 & p1 & SAINT_MIN); name += p2 < 0; p3 = SA[i + 3]; SAm[(SA[i + 3] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { p2 = p3; p3 = SA[i]; SAm[(SA[i] = p3 & SAINT_MAX) >> 1] = name | (p3 & p2 & SAINT_MIN); name += p3 < 0; } return name; } static void libsais_mark_distinct_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; sa_sint_t p0, p1, p2, p3 = 0; for (i = (fast_sint_t)m + omp_block_start, j = (fast_sint_t)m + omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais_prefetchw(&SA[i + prefetch_distance]); p0 = SA[i + 0]; SA[i + 0] = p0 & (p3 | SAINT_MAX); p0 = (p0 == 0) ? p3 : p0; p1 = SA[i + 1]; SA[i + 1] = p1 & (p0 | SAINT_MAX); p1 = (p1 == 0) ? p0 : p1; p2 = SA[i + 2]; SA[i + 2] = p2 & (p1 | SAINT_MAX); p2 = (p2 == 0) ? p1 : p2; p3 = SA[i + 3]; SA[i + 3] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3; } for (j += 3; i < j; i += 1) { p2 = p3; p3 = SA[i]; SA[i] = p3 & (p2 | SAINT_MAX); p3 = (p3 == 0) ? p2 : p3; } } static void libsais_clamp_lms_suffixes_length_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais_prefetchw(&SAm[i + prefetch_distance]); SAm[i + 0] = (SAm[i + 0] < 0 ? SAm[i + 0] : 0) & SAINT_MAX; SAm[i + 1] = (SAm[i + 1] < 0 ? SAm[i + 1] : 0) & SAINT_MAX; SAm[i + 2] = (SAm[i + 2] < 0 ? SAm[i + 2] : 0) & SAINT_MAX; SAm[i + 3] = (SAm[i + 3] < 0 ? SAm[i + 3] : 0) & SAINT_MAX; } for (j += 3; i < j; i += 1) { SAm[i] = (SAm[i] < 0 ? SAm[i] : 0) & SAINT_MAX; } } static sa_sint_t libsais_renumber_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t name = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { name = libsais_renumber_distinct_lms_suffixes_32s_4k(SA, m, 1, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 1; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { name = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais_renumber_distinct_lms_suffixes_32s_4k(SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return name - 1; } static void libsais_mark_distinct_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n >> 1; #endif libsais_mark_distinct_lms_suffixes_32s(SA, m, omp_block_start, omp_block_size); } } static void libsais_clamp_lms_suffixes_length_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n >> 1; #endif libsais_clamp_lms_suffixes_length_32s(SA, m, omp_block_start, omp_block_size); } } static sa_sint_t libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { memset(&SA[m], 0, ((size_t)n >> 1) * sizeof(sa_sint_t)); sa_sint_t name = libsais_renumber_distinct_lms_suffixes_32s_4k_omp(SA, m, threads, thread_state); if (name < m) { libsais_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads); } return name; } static sa_sint_t libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; { libsais_gather_lms_suffixes_32s(T, SA, n); memset(&SA[m], 0, ((size_t)n - (size_t)m - (size_t)m) * sizeof(sa_sint_t)); fast_sint_t i, j; for (i = (fast_sint_t)n - (fast_sint_t)m, j = (fast_sint_t)n - 1 - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]); SAm[((sa_uint_t)SA[i + 0]) >> 1] = SA[i + 1] - SA[i + 0] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 1]) >> 1] = SA[i + 2] - SA[i + 1] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 2]) >> 1] = SA[i + 3] - SA[i + 2] + 1 + SAINT_MIN; SAm[((sa_uint_t)SA[i + 3]) >> 1] = SA[i + 4] - SA[i + 3] + 1 + SAINT_MIN; } for (j += prefetch_distance + 3; i < j; i += 1) { SAm[((sa_uint_t)SA[i]) >> 1] = SA[i + 1] - SA[i] + 1 + SAINT_MIN; } SAm[((sa_uint_t)SA[n - 1]) >> 1] = 1 + SAINT_MIN; } { libsais_clamp_lms_suffixes_length_32s_omp(SA, n, m, threads); } sa_sint_t name = 1; { fast_sint_t i, j, p = SA[0], plen = SAm[p >> 1]; sa_sint_t pdiff = SAINT_MIN; for (i = 1, j = m - prefetch_distance - 1; i < j; i += 2) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 0])]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais_prefetch(&T[((sa_uint_t)SA[i + prefetch_distance + 1])]); fast_sint_t q = SA[i + 0], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN; if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < qlen); qdiff = (sa_sint_t)(l - qlen) & SAINT_MIN; } SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0); p = SA[i + 1]; plen = SAm[p >> 1]; pdiff = SAINT_MIN; if (qlen == plen) { fast_sint_t l = 0; do { if (T[q + l] != T[p + l]) { break; } } while (++l < plen); pdiff = (sa_sint_t)(l - plen) & SAINT_MIN; } SAm[q >> 1] = name | (qdiff & pdiff); name += (pdiff < 0); } for (j += prefetch_distance + 1; i < j; i += 1) { fast_sint_t q = SA[i], qlen = SAm[q >> 1]; sa_sint_t qdiff = SAINT_MIN; if (plen == qlen) { fast_sint_t l = 0; do { if (T[p + l] != T[q + l]) { break; } } while (++l < plen); qdiff = (sa_sint_t)(l - plen) & SAINT_MIN; } SAm[p >> 1] = name | (pdiff & qdiff); name += (qdiff < 0); p = q; plen = qlen; pdiff = qdiff; } SAm[p >> 1] = name | pdiff; name++; } if (name <= m) { libsais_mark_distinct_lms_suffixes_32s_omp(SA, n, m, threads); } return name - 1; } static void libsais_reconstruct_lms_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[n - m]; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&SAnm[SA[i + prefetch_distance + 0]]); libsais_prefetch(&SAnm[SA[i + prefetch_distance + 1]]); libsais_prefetch(&SAnm[SA[i + prefetch_distance + 2]]); libsais_prefetch(&SAnm[SA[i + prefetch_distance + 3]]); SA[i + 0] = SAnm[SA[i + 0]]; SA[i + 1] = SAnm[SA[i + 1]]; SA[i + 2] = SAnm[SA[i + 2]]; SA[i + 3] = SAnm[SA[i + 3]]; } for (j += prefetch_distance + 3; i < j; i += 1) { SA[i] = SAnm[SA[i]]; } } static void libsais_reconstruct_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = m; #endif libsais_reconstruct_lms_suffixes(SA, n, m, omp_block_start, omp_block_size); } } static void libsais_place_lms_suffixes_interval_8u(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[7 * ALPHABET_SIZE]; fast_sint_t c, j = n; for (c = ALPHABET_SIZE - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_interval_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1) + BUCKETS_INDEX2(1, 0)] - (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_interval_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { fast_sint_t j = n; if (k > 1) { fast_sint_t c; for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(1, 1)] - (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)]; if (l > 0) { fast_sint_t i = buckets[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_interval_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t k, sa_sint_t m, sa_sint_t * RESTRICT buckets) { const fast_sint_t prefetch_distance = 32; sa_sint_t c = k - 1; fast_sint_t i, l = buckets[c]; for (i = (fast_sint_t)m - 1; i >= prefetch_distance + 3; i -= 4) { libsais_prefetch(&SA[i - 2 * prefetch_distance]); libsais_prefetch(&T[SA[i - prefetch_distance - 0]]); libsais_prefetch(&T[SA[i - prefetch_distance - 1]]); libsais_prefetch(&T[SA[i - prefetch_distance - 2]]); libsais_prefetch(&T[SA[i - prefetch_distance - 3]]); sa_sint_t p0 = SA[i - 0]; if (T[p0] != c) { c = T[p0]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p0; sa_sint_t p1 = SA[i - 1]; if (T[p1] != c) { c = T[p1]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p1; sa_sint_t p2 = SA[i - 2]; if (T[p2] != c) { c = T[p2]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p2; sa_sint_t p3 = SA[i - 3]; if (T[p3] != c) { c = T[p3]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p3; } for (; i >= 0; i -= 1) { sa_sint_t p = SA[i]; if (T[p] != c) { c = T[p]; memset(&SA[buckets[c]], 0, (size_t)(l - buckets[c]) * sizeof(sa_sint_t)); l = buckets[c]; } SA[--l] = p; } memset(&SA[0], 0, (size_t)l * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_histogram_32s_6k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[5 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX4(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_histogram_32s_4k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { const sa_sint_t * RESTRICT bucket_end = &buckets[3 * k]; fast_sint_t c, j = n; for (c = (fast_sint_t)k - 2; c >= 0; --c) { fast_sint_t l = (fast_sint_t)buckets[BUCKETS_INDEX2(c, 1)]; if (l > 0) { fast_sint_t i = bucket_end[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_place_lms_suffixes_histogram_32s_2k(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, const sa_sint_t * RESTRICT buckets) { fast_sint_t j = n; if (k > 1) { fast_sint_t c; for (c = BUCKETS_INDEX2((fast_sint_t)k - 2, 0); c >= BUCKETS_INDEX2(0, 0); c -= BUCKETS_INDEX2(1, 0)) { fast_sint_t l = (fast_sint_t)buckets[c + BUCKETS_INDEX2(0, 1)]; if (l > 0) { fast_sint_t i = buckets[c]; if (j - i > 0) { memset(&SA[i], 0, (size_t)(j - i) * sizeof(sa_sint_t)); } memmove(&SA[j = (i - l)], &SA[m -= (sa_sint_t)l], (size_t)l * sizeof(sa_sint_t)); } } } memset(&SA[0], 0, (size_t)j * sizeof(sa_sint_t)); } static void libsais_final_bwt_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } static void libsais_final_bwt_aux_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]]; }} sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]]; }} } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } } } } static void libsais_final_sorting_scan_left_to_right_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } static void libsais_final_sorting_scan_left_to_right_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - 2 * prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 3 * prefetch_distance]); sa_sint_t s0 = SA[i + 2 * prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + 2 * prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i + 1 * prefetch_distance + 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i + 1 * prefetch_distance + 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; SA[induction_bucket[T[p0]]++] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; SA[induction_bucket[T[p1]]++] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += 2 * prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static fast_sint_t libsais_final_bwt_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[i + 0] = T[p0] | SAINT_MIN; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[i + 1] = T[p1] | SAINT_MIN; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[i] = T[p] | SAINT_MIN; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } return count; } static fast_sint_t libsais_final_sorting_scan_left_to_right_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } return count; } static void libsais_final_order_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais_prefetch(&cache[i + prefetch_distance]); SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index; SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index; SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index; SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index; } for (j += 3; i < j; i += 1) { SA[buckets[cache[i].symbol]++] = cache[i].index; } } static void libsais_final_bwt_aux_scan_left_to_right_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais_prefetch(&cache[i + prefetch_distance]); SA[buckets[cache[i + 0].symbol]++] = cache[i + 0].index; if ((cache[i + 0].index & rm) == 0) { I[(cache[i + 0].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 0].symbol]; } SA[buckets[cache[i + 1].symbol]++] = cache[i + 1].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 1].symbol]; } SA[buckets[cache[i + 2].symbol]++] = cache[i + 2].index; if ((cache[i + 2].index & rm) == 0) { I[(cache[i + 2].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 2].symbol]; } SA[buckets[cache[i + 3].symbol]++] = cache[i + 3].index; if ((cache[i + 3].index & rm) == 0) { I[(cache[i + 3].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i + 3].symbol]; } } for (j += 3; i < j; i += 1) { SA[buckets[cache[i].symbol]++] = cache[i].index; if ((cache[i].index & rm) == 0) { I[(cache[i].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol]; } } } static void libsais_final_sorting_scan_left_to_right_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 ^ SAINT_MIN; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] < T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 ^ SAINT_MIN; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] < T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p ^ SAINT_MIN; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol; } } static void libsais_final_sorting_scan_left_to_right_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j, omp_block_end = omp_block_start + omp_block_size; for (i = omp_block_start, j = omp_block_end - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&cache[i + 2 * prefetch_distance]); sa_sint_t s0 = cache[i + prefetch_distance + 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i + prefetch_distance + 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i + 0].symbol; if (v0 >= 0) { cache[i + 0].symbol = induction_bucket[v0]++; if (cache[i + 0].symbol < omp_block_end) { sa_sint_t ni = cache[i + 0].symbol, np = cache[i + 0].index; cache[i + 0].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } sa_sint_t v1 = cache[i + 1].symbol; if (v1 >= 0) { cache[i + 1].symbol = induction_bucket[v1]++; if (cache[i + 1].symbol < omp_block_end) { sa_sint_t ni = cache[i + 1].symbol, np = cache[i + 1].index; cache[i + 1].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = induction_bucket[v]++; if (cache[i].symbol < omp_block_end) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np ^ SAINT_MIN; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] < T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } } static void libsais_final_bwt_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_bwt_scan_left_to_right_8u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_order_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_bwt_aux_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_bwt_aux_scan_left_to_right_8u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_bwt_aux_scan_left_to_right_8u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_sorting_scan_left_to_right_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_sorting_scan_left_to_right_8u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_sorting_scan_left_to_right_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = 0; t < omp_num_threads; ++t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A + B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_order_scan_left_to_right_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_sorting_scan_left_to_right_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_sorting_scan_left_to_right_32s(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_final_sorting_scan_left_to_right_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_final_sorting_scan_left_to_right_32s_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static void libsais_final_bwt_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais_final_bwt_scan_left_to_right_8u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } else { libsais_final_bwt_scan_left_to_right_8u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_bwt_aux_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if ((((sa_sint_t)n - 1) & rm) == 0) { I[((sa_sint_t)n - 1) / (rm + 1)] = induction_bucket[T[(sa_sint_t)n - 1]]; } if (threads == 1 || n < 65536) { libsais_final_bwt_aux_scan_left_to_right_8u(T, SA, rm, I, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[block_start] = T[p] | SAINT_MIN; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]]; } } } } else { libsais_final_bwt_aux_scan_left_to_right_8u_block_omp(T, SA, rm, I, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_sorting_scan_left_to_right_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, fast_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[(sa_sint_t)n - 1]]++] = ((sa_sint_t)n - 1) | ((sa_sint_t)(T[(sa_sint_t)n - 2] < T[(sa_sint_t)n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais_final_sorting_scan_left_to_right_8u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = 0; block_start < n; ) { if (SA[block_start] == 0) { block_start++; } else { fast_sint_t block_max_end = block_start + ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end > n) { block_max_end = n;} fast_sint_t block_end = block_start + 1; while (block_end < block_max_end && SA[block_end] != 0) { block_end++; } fast_sint_t block_size = block_end - block_start; if (block_size < 32) { for (; block_start < block_end; block_start += 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p ^ SAINT_MIN; if (p > 0) { p--; SA[induction_bucket[T[p]]++] = p | ((sa_sint_t)(T[p - (p > 0)] < T[p]) << (SAINT_BIT - 1)); } } } else { libsais_final_sorting_scan_left_to_right_8u_block_omp(T, SA, induction_bucket, block_start, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_sorting_scan_left_to_right_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { SA[induction_bucket[T[n - 1]]++] = (n - 1) | ((sa_sint_t)(T[n - 2] < T[n - 1]) << (SAINT_BIT - 1)); if (threads == 1 || n < 65536) { libsais_final_sorting_scan_left_to_right_32s(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = 0; block_start < n; block_start = block_end) { block_end = block_start + (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end > n) { block_end = n; } libsais_final_sorting_scan_left_to_right_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_start, block_end - block_start, threads); } } #else UNUSED(thread_state); #endif } static sa_sint_t libsais_final_bwt_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; sa_sint_t index = -1; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; index = (p0 == 0) ? (sa_sint_t)(i - 0) : index; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; } sa_sint_t p1 = SA[i - 1]; index = (p1 == 0) ? (sa_sint_t)(i - 1) : index; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; index = (p == 0) ? (sa_sint_t)i : index; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; } } return index; } static void libsais_final_bwt_aux_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p0 : t; if ((p0 & rm) == 0) { I[p0 / (rm + 1)] = induction_bucket[T[p0]] + 1; } } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p1 : t; if ((p1 & rm) == 0) { I[p1 / (rm + 1)] = induction_bucket[T[p1]] + 1; } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } } } } static void libsais_final_sorting_scan_right_to_left_8u(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } static void libsais_final_sorting_scan_right_to_left_32s(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + 2 * prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 3 * prefetch_distance]); sa_sint_t s0 = SA[i - 2 * prefetch_distance - 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - 2 * prefetch_distance - 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t s2 = SA[i - 1 * prefetch_distance - 0]; if (s2 > 0) { libsais_prefetchw(&induction_bucket[T[s2 - 1]]); libsais_prefetch(&T[s2] - 2); } sa_sint_t s3 = SA[i - 1 * prefetch_distance - 1]; if (s3 > 0) { libsais_prefetchw(&induction_bucket[T[s3 - 1]]); libsais_prefetch(&T[s3] - 2); } sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; SA[--induction_bucket[T[p0]]] = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; SA[--induction_bucket[T[p1]]] = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= 2 * prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } #if defined(_OPENMP) static fast_sint_t libsais_final_bwt_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p0 : t; } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p1 : t; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count++].index = (c0 <= c1) ? p : t; } } return count; } static fast_sint_t libsais_final_bwt_aux_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; uint8_t c0 = T[p0 - (p0 > 0)], c1 = T[p0]; SA[i - 0] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p0 : t; cache[count + 1].index = p0; count += 2; } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; uint8_t c0 = T[p1 - (p1 > 0)], c1 = T[p1]; SA[i - 1] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p1 : t; cache[count + 1].index = p1; count += 2; } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[i] = c1; sa_sint_t t = c0 | SAINT_MIN; buckets[cache[count].symbol = c1]++; cache[count].index = (c0 <= c1) ? p : t; cache[count + 1].index = p; count += 2; } } return count; } static fast_sint_t libsais_final_sorting_scan_right_to_left_8u_block_prepare(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; memset(buckets, 0, ALPHABET_SIZE * sizeof(sa_sint_t)); fast_sint_t i, j, count = 0; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&SA[i - 2 * prefetch_distance]); sa_sint_t s0 = SA[i - prefetch_distance - 0]; const uint8_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i - prefetch_distance - 1]; const uint8_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); sa_sint_t p0 = SA[i - 0]; SA[i - 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; buckets[cache[count].symbol = T[p0]]++; cache[count++].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); } sa_sint_t p1 = SA[i - 1]; SA[i - 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; buckets[cache[count].symbol = T[p1]]++; cache[count++].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; buckets[cache[count].symbol = T[p]]++; cache[count++].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } return count; } static void libsais_final_order_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 3; i < j; i += 4) { libsais_prefetch(&cache[i + prefetch_distance]); SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index; SA[--buckets[cache[i + 1].symbol]] = cache[i + 1].index; SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index; SA[--buckets[cache[i + 3].symbol]] = cache[i + 3].index; } for (j += 3; i < j; i += 1) { SA[--buckets[cache[i].symbol]] = cache[i].index; } } static void libsais_final_bwt_aux_scan_right_to_left_8u_block_place(sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t count) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = count - 6; i < j; i += 8) { libsais_prefetch(&cache[i + prefetch_distance]); SA[--buckets[cache[i + 0].symbol]] = cache[i + 0].index; if ((cache[i + 1].index & rm) == 0) { I[cache[i + 1].index / (rm + 1)] = buckets[cache[i + 0].symbol] + 1; } SA[--buckets[cache[i + 2].symbol]] = cache[i + 2].index; if ((cache[i + 3].index & rm) == 0) { I[cache[i + 3].index / (rm + 1)] = buckets[cache[i + 2].symbol] + 1; } SA[--buckets[cache[i + 4].symbol]] = cache[i + 4].index; if ((cache[i + 5].index & rm) == 0) { I[cache[i + 5].index / (rm + 1)] = buckets[cache[i + 4].symbol] + 1; } SA[--buckets[cache[i + 6].symbol]] = cache[i + 6].index; if ((cache[i + 7].index & rm) == 0) { I[cache[i + 7].index / (rm + 1)] = buckets[cache[i + 6].symbol] + 1; } } for (j += 6; i < j; i += 2) { SA[--buckets[cache[i].symbol]] = cache[i].index; if ((cache[i + 1].index & rm) == 0) { I[(cache[i + 1].index & SAINT_MAX) / (rm + 1)] = buckets[cache[i].symbol] + 1; } } } static void libsais_final_sorting_scan_right_to_left_32s_block_gather(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 1; i < j; i += 2) { libsais_prefetchw(&SA[i + 2 * prefetch_distance]); sa_sint_t s0 = SA[i + prefetch_distance + 0]; const sa_sint_t * Ts0 = &T[s0] - 1; libsais_prefetch(s0 > 0 ? Ts0 : NULL); Ts0--; libsais_prefetch(s0 > 0 ? Ts0 : NULL); sa_sint_t s1 = SA[i + prefetch_distance + 1]; const sa_sint_t * Ts1 = &T[s1] - 1; libsais_prefetch(s1 > 0 ? Ts1 : NULL); Ts1--; libsais_prefetch(s1 > 0 ? Ts1 : NULL); libsais_prefetchw(&cache[i + prefetch_distance]); sa_sint_t symbol0 = SAINT_MIN, p0 = SA[i + 0]; SA[i + 0] = p0 & SAINT_MAX; if (p0 > 0) { p0--; cache[i + 0].index = p0 | ((sa_sint_t)(T[p0 - (p0 > 0)] > T[p0]) << (SAINT_BIT - 1)); symbol0 = T[p0]; } cache[i + 0].symbol = symbol0; sa_sint_t symbol1 = SAINT_MIN, p1 = SA[i + 1]; SA[i + 1] = p1 & SAINT_MAX; if (p1 > 0) { p1--; cache[i + 1].index = p1 | ((sa_sint_t)(T[p1 - (p1 > 0)] > T[p1]) << (SAINT_BIT - 1)); symbol1 = T[p1]; } cache[i + 1].symbol = symbol1; } for (j += prefetch_distance + 1; i < j; i += 1) { sa_sint_t symbol = SAINT_MIN, p = SA[i]; SA[i] = p & SAINT_MAX; if (p > 0) { p--; cache[i].index = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); symbol = T[p]; } cache[i].symbol = symbol; } } static void libsais_final_sorting_scan_right_to_left_32s_block_sort(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT induction_bucket, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = omp_block_start + omp_block_size - 1, j = omp_block_start + prefetch_distance + 1; i >= j; i -= 2) { libsais_prefetchw(&cache[i - 2 * prefetch_distance]); sa_sint_t s0 = cache[i - prefetch_distance - 0].symbol; const sa_sint_t * Is0 = &induction_bucket[s0]; libsais_prefetchw(s0 >= 0 ? Is0 : NULL); sa_sint_t s1 = cache[i - prefetch_distance - 1].symbol; const sa_sint_t * Is1 = &induction_bucket[s1]; libsais_prefetchw(s1 >= 0 ? Is1 : NULL); sa_sint_t v0 = cache[i - 0].symbol; if (v0 >= 0) { cache[i - 0].symbol = --induction_bucket[v0]; if (cache[i - 0].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 0].symbol, np = cache[i - 0].index; cache[i - 0].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } sa_sint_t v1 = cache[i - 1].symbol; if (v1 >= 0) { cache[i - 1].symbol = --induction_bucket[v1]; if (cache[i - 1].symbol >= omp_block_start) { sa_sint_t ni = cache[i - 1].symbol, np = cache[i - 1].index; cache[i - 1].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } for (j -= prefetch_distance + 1; i >= j; i -= 1) { sa_sint_t v = cache[i].symbol; if (v >= 0) { cache[i].symbol = --induction_bucket[v]; if (cache[i].symbol >= omp_block_start) { sa_sint_t ni = cache[i].symbol, np = cache[i].index; cache[i].index = np & SAINT_MAX; if (np > 0) { np--; cache[ni].index = np | ((sa_sint_t)(T[np - (np > 0)] > T[np]) << (SAINT_BIT - 1)); cache[ni].symbol = T[np]; } } } } } static void libsais_final_bwt_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_bwt_scan_right_to_left_8u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_bwt_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_order_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_bwt_aux_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_bwt_aux_scan_right_to_left_8u(T, SA, rm, I, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_bwt_aux_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_bwt_aux_scan_right_to_left_8u_block_place(SA, rm, I, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_sorting_scan_right_to_left_8u_block_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT induction_bucket, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 64 * ALPHABET_SIZE && omp_get_dynamic() == 0) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_sorting_scan_right_to_left_8u(T, SA, induction_bucket, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_final_sorting_scan_right_to_left_8u_block_prepare(T, SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t; for (t = omp_num_threads - 1; t >= 0; --t) { sa_sint_t * RESTRICT temp_bucket = thread_state[t].state.buckets; fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_sint_t A = induction_bucket[c], B = temp_bucket[c]; induction_bucket[c] = A - B; temp_bucket[c] = A; } } } #pragma omp barrier { libsais_final_order_scan_right_to_left_8u_block_place(SA, thread_state[omp_thread_num].state.buckets, thread_state[omp_thread_num].state.cache, thread_state[omp_thread_num].state.count); } } #endif } } static void libsais_final_sorting_scan_right_to_left_32s_block_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t * RESTRICT buckets, LIBSAIS_THREAD_CACHE * RESTRICT cache, fast_sint_t block_start, fast_sint_t block_size, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && block_size >= 16384) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(cache); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (block_size / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : block_size - omp_block_start; omp_block_start += block_start; if (omp_num_threads == 1) { libsais_final_sorting_scan_right_to_left_32s(T, SA, buckets, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { libsais_final_sorting_scan_right_to_left_32s_block_gather(T, SA, cache - block_start, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { libsais_final_sorting_scan_right_to_left_32s_block_sort(T, buckets, cache - block_start, block_start, block_size); } #pragma omp barrier { libsais_compact_and_place_cached_suffixes(SA, cache - block_start, omp_block_start, omp_block_size); } } #endif } } #endif static sa_sint_t libsais_final_bwt_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t index = -1; if (threads == 1 || n < 65536) { index = libsais_final_bwt_scan_right_to_left_8u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { index = (sa_sint_t)block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < 0) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; } } } else { libsais_final_bwt_scan_right_to_left_8u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif return index; } static void libsais_final_bwt_aux_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t rm, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais_final_bwt_aux_scan_right_to_left_8u(T, SA, rm, I, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * ((LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads) / 2); if (block_max_end < 0) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; uint8_t c0 = T[p - (p > 0)], c1 = T[p]; SA[block_start] = c1; sa_sint_t t = c0 | SAINT_MIN; SA[--induction_bucket[c1]] = (c0 <= c1) ? p : t; if ((p & rm) == 0) { I[p / (rm + 1)] = induction_bucket[T[p]] + 1; } } } } else { libsais_final_bwt_aux_scan_right_to_left_8u_block_omp(T, SA, rm, I, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_sorting_scan_right_to_left_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais_final_sorting_scan_right_to_left_8u(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start; for (block_start = (fast_sint_t)n - 1; block_start >= 0; ) { if (SA[block_start] == 0) { block_start--; } else { fast_sint_t block_max_end = block_start - ((fast_sint_t)threads) * (LIBSAIS_PER_THREAD_CACHE_SIZE - 16 * (fast_sint_t)threads); if (block_max_end < -1) { block_max_end = -1; } fast_sint_t block_end = block_start - 1; while (block_end > block_max_end && SA[block_end] != 0) { block_end--; } fast_sint_t block_size = block_start - block_end; if (block_size < 32) { for (; block_start > block_end; block_start -= 1) { sa_sint_t p = SA[block_start]; SA[block_start] = p & SAINT_MAX; if (p > 0) { p--; SA[--induction_bucket[T[p]]] = p | ((sa_sint_t)(T[p - (p > 0)] > T[p]) << (SAINT_BIT - 1)); } } } else { libsais_final_sorting_scan_right_to_left_8u_block_omp(T, SA, induction_bucket, block_end + 1, block_size, threads, thread_state); block_start = block_end; } } } } #else UNUSED(thread_state); #endif } static void libsais_final_sorting_scan_right_to_left_32s_omp(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t * RESTRICT induction_bucket, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (threads == 1 || n < 65536) { libsais_final_sorting_scan_right_to_left_32s(T, SA, induction_bucket, 0, n); } #if defined(_OPENMP) else { fast_sint_t block_start, block_end; for (block_start = (fast_sint_t)n - 1; block_start >= 0; block_start = block_end) { block_end = block_start - (fast_sint_t)threads * LIBSAIS_PER_THREAD_CACHE_SIZE; if (block_end < 0) { block_end = -1; } libsais_final_sorting_scan_right_to_left_32s_block_omp(T, SA, induction_bucket, thread_state[0].state.cache, block_end + 1, block_start - block_end, threads); } } #else UNUSED(thread_state); #endif } static void libsais_clear_lms_suffixes_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT bucket_start, sa_sint_t * RESTRICT bucket_end, sa_sint_t threads) { fast_sint_t c; #if defined(_OPENMP) #pragma omp parallel for schedule(static, 1) num_threads(threads) if(threads > 1 && n >= 65536) #else UNUSED(threads); UNUSED(n); #endif for (c = 0; c < k; ++c) { if (bucket_end[c] > bucket_start[c]) { memset(&SA[bucket_start[c]], 0, ((size_t)bucket_end[c] - (size_t)bucket_start[c]) * sizeof(sa_sint_t)); } } } static sa_sint_t libsais_induce_final_order_8u_omp(const uint8_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (!bwt) { libsais_final_sorting_scan_left_to_right_8u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } libsais_final_sorting_scan_right_to_left_8u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state); return 0; } else if (I != NULL) { libsais_final_bwt_aux_scan_left_to_right_8u_omp(T, SA, n, r - 1, I, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } libsais_final_bwt_aux_scan_right_to_left_8u_omp(T, SA, n, r - 1, I, &buckets[7 * ALPHABET_SIZE], threads, thread_state); return 0; } else { libsais_final_bwt_scan_left_to_right_8u_omp(T, SA, n, &buckets[6 * ALPHABET_SIZE], threads, thread_state); if (threads > 1 && n >= 65536) { libsais_clear_lms_suffixes_omp(SA, n, ALPHABET_SIZE, &buckets[6 * ALPHABET_SIZE], &buckets[7 * ALPHABET_SIZE], threads); } return libsais_final_bwt_scan_right_to_left_8u_omp(T, SA, n, &buckets[7 * ALPHABET_SIZE], threads, thread_state); } } static void libsais_induce_final_order_32s_6k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[4 * k], threads, thread_state); libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[5 * k], threads, thread_state); } static void libsais_induce_final_order_32s_4k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[2 * k], threads, thread_state); libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[3 * k], threads, thread_state); } static void libsais_induce_final_order_32s_2k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, &buckets[1 * k], threads, thread_state); libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, &buckets[0 * k], threads, thread_state); } static void libsais_induce_final_order_32s_1k(const sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_start_32s_1k(k, buckets); libsais_final_sorting_scan_left_to_right_32s_omp(T, SA, n, buckets, threads, thread_state); libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_end_32s_1k(k, buckets); libsais_final_sorting_scan_right_to_left_32s_omp(T, SA, n, buckets, threads, thread_state); } static sa_sint_t libsais_renumber_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t f, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; sa_sint_t i, j; for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 2 * (sa_sint_t)prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 3 * prefetch_distance]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 0]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 1]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 2]) >> 1]); libsais_prefetchw(&SAm[((sa_uint_t)SA[i + 2 * prefetch_distance + 3]) >> 1]); sa_uint_t q0 = (sa_uint_t)SA[i + prefetch_distance + 0]; const sa_sint_t * Tq0 = &T[q0]; libsais_prefetchw(SAm[q0 >> 1] < 0 ? Tq0 : NULL); sa_uint_t q1 = (sa_uint_t)SA[i + prefetch_distance + 1]; const sa_sint_t * Tq1 = &T[q1]; libsais_prefetchw(SAm[q1 >> 1] < 0 ? Tq1 : NULL); sa_uint_t q2 = (sa_uint_t)SA[i + prefetch_distance + 2]; const sa_sint_t * Tq2 = &T[q2]; libsais_prefetchw(SAm[q2 >> 1] < 0 ? Tq2 : NULL); sa_uint_t q3 = (sa_uint_t)SA[i + prefetch_distance + 3]; const sa_sint_t * Tq3 = &T[q3]; libsais_prefetchw(SAm[q3 >> 1] < 0 ? Tq3 : NULL); sa_uint_t p0 = (sa_uint_t)SA[i + 0]; sa_sint_t s0 = SAm[p0 >> 1]; if (s0 < 0) { T[p0] |= SAINT_MIN; f++; s0 = i + 0 + SAINT_MIN + f; } SAm[p0 >> 1] = s0 - f; sa_uint_t p1 = (sa_uint_t)SA[i + 1]; sa_sint_t s1 = SAm[p1 >> 1]; if (s1 < 0) { T[p1] |= SAINT_MIN; f++; s1 = i + 1 + SAINT_MIN + f; } SAm[p1 >> 1] = s1 - f; sa_uint_t p2 = (sa_uint_t)SA[i + 2]; sa_sint_t s2 = SAm[p2 >> 1]; if (s2 < 0) { T[p2] |= SAINT_MIN; f++; s2 = i + 2 + SAINT_MIN + f; } SAm[p2 >> 1] = s2 - f; sa_uint_t p3 = (sa_uint_t)SA[i + 3]; sa_sint_t s3 = SAm[p3 >> 1]; if (s3 < 0) { T[p3] |= SAINT_MIN; f++; s3 = i + 3 + SAINT_MIN + f; } SAm[p3 >> 1] = s3 - f; } for (j += 2 * (sa_sint_t)prefetch_distance + 3; i < j; i += 1) { sa_uint_t p = (sa_uint_t)SA[i]; sa_sint_t s = SAm[p >> 1]; if (s < 0) { T[p] |= SAINT_MIN; f++; s = i + SAINT_MIN + f; } SAm[p >> 1] = s - f; } return f; } static void libsais_compact_unique_and_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t * pl, fast_sint_t * pr, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAl = &SA[0]; sa_sint_t * RESTRICT SAr = &SA[0]; fast_sint_t i, j, l = *pl - 1, r = *pr - 1; for (i = (fast_sint_t)m + omp_block_start + omp_block_size - 1, j = (fast_sint_t)m + omp_block_start + 3; i >= j; i -= 4) { libsais_prefetch(&SA[i - prefetch_distance]); sa_sint_t p0 = SA[i - 0]; SAl[l] = p0 & SAINT_MAX; l -= p0 < 0; SAr[r] = p0 - 1; r -= p0 > 0; sa_sint_t p1 = SA[i - 1]; SAl[l] = p1 & SAINT_MAX; l -= p1 < 0; SAr[r] = p1 - 1; r -= p1 > 0; sa_sint_t p2 = SA[i - 2]; SAl[l] = p2 & SAINT_MAX; l -= p2 < 0; SAr[r] = p2 - 1; r -= p2 > 0; sa_sint_t p3 = SA[i - 3]; SAl[l] = p3 & SAINT_MAX; l -= p3 < 0; SAr[r] = p3 - 1; r -= p3 > 0; } for (j -= 3; i >= j; i -= 1) { sa_sint_t p = SA[i]; SAl[l] = p & SAINT_MAX; l -= p < 0; SAr[r] = p - 1; r -= p > 0; } *pl = l + 1; *pr = r + 1; } #if defined(_OPENMP) static sa_sint_t libsais_count_unique_suffixes(sa_sint_t * RESTRICT SA, sa_sint_t m, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; sa_sint_t * RESTRICT SAm = &SA[m]; fast_sint_t i, j; sa_sint_t f0 = 0, f1 = 0, f2 = 0, f3 = 0; for (i = omp_block_start, j = omp_block_start + omp_block_size - prefetch_distance - 3; i < j; i += 4) { libsais_prefetch(&SA[i + 2 * prefetch_distance]); libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 0]) >> 1]); libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 1]) >> 1]); libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 2]) >> 1]); libsais_prefetch(&SAm[((sa_uint_t)SA[i + prefetch_distance + 3]) >> 1]); f0 += SAm[((sa_uint_t)SA[i + 0]) >> 1] < 0; f1 += SAm[((sa_uint_t)SA[i + 1]) >> 1] < 0; f2 += SAm[((sa_uint_t)SA[i + 2]) >> 1] < 0; f3 += SAm[((sa_uint_t)SA[i + 3]) >> 1] < 0; } for (j += prefetch_distance + 3; i < j; i += 1) { f0 += SAm[((sa_uint_t)SA[i]) >> 1] < 0; } return f0 + f1 + f2 + f3; } #endif static sa_sint_t libsais_renumber_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t f = 0; #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { f = libsais_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_unique_suffixes(SA, m, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } if (omp_thread_num == omp_num_threads - 1) { f = (sa_sint_t)(count + thread_state[omp_thread_num].state.count); } libsais_renumber_unique_and_nonunique_lms_suffixes_32s(T, SA, m, (sa_sint_t)count, omp_block_start, omp_block_size); } } #endif } return f; } static void libsais_compact_unique_and_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 131072 && m < fs) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (((fast_sint_t)n >> 1) / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : ((fast_sint_t)n >> 1) - omp_block_start; if (omp_num_threads == 1) { fast_sint_t l = m, r = (fast_sint_t)n + (fast_sint_t)fs; libsais_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &l, &r, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.position = (fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_start + omp_block_size; thread_state[omp_thread_num].state.count = (fast_sint_t)m + omp_block_start + omp_block_size; libsais_compact_unique_and_nonunique_lms_suffixes_32s(SA, m, &thread_state[omp_thread_num].state.position, &thread_state[omp_thread_num].state.count, omp_block_start, omp_block_size); } #pragma omp barrier #pragma omp master { fast_sint_t t, position; for (position = m, t = omp_num_threads - 1; t >= 0; --t) { fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1); fast_sint_t count = ((fast_sint_t)m + ((fast_sint_t)n >> 1) + omp_block_end - thread_state[t].state.position); if (count > 0) { position -= count; memcpy(&SA[position], &SA[thread_state[t].state.position], (size_t)count * sizeof(sa_sint_t)); } } for (position = (fast_sint_t)n + (fast_sint_t)fs, t = omp_num_threads - 1; t >= 0; --t) { fast_sint_t omp_block_end = t < omp_num_threads - 1 ? omp_block_stride * (t + 1) : ((fast_sint_t)n >> 1); fast_sint_t count = ((fast_sint_t)m + omp_block_end - thread_state[t].state.count); if (count > 0) { position -= count; memcpy(&SA[position], &SA[thread_state[t].state.count], (size_t)count * sizeof(sa_sint_t)); } } } } #endif } memcpy(&SA[(fast_sint_t)n + (fast_sint_t)fs - (fast_sint_t)m], &SA[(fast_sint_t)m - (fast_sint_t)f], (size_t)f * sizeof(sa_sint_t)); } static sa_sint_t libsais_compact_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t f = libsais_renumber_unique_and_nonunique_lms_suffixes_32s_omp(T, SA, m, threads, thread_state); libsais_compact_unique_and_nonunique_lms_suffixes_32s_omp(SA, n, m, fs, f, threads, thread_state); return f; } static void libsais_merge_unique_lms_suffixes_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l]; sa_sint_t i, j; fast_sint_t tmp = *SAnm++; for (i = (sa_sint_t)omp_block_start, j = (sa_sint_t)omp_block_start + (sa_sint_t)omp_block_size - 6; i < j; i += 4) { libsais_prefetch(&T[i + prefetch_distance]); sa_sint_t c0 = T[i + 0]; if (c0 < 0) { T[i + 0] = c0 & SAINT_MAX; SA[tmp] = i + 0; i++; tmp = *SAnm++; } sa_sint_t c1 = T[i + 1]; if (c1 < 0) { T[i + 1] = c1 & SAINT_MAX; SA[tmp] = i + 1; i++; tmp = *SAnm++; } sa_sint_t c2 = T[i + 2]; if (c2 < 0) { T[i + 2] = c2 & SAINT_MAX; SA[tmp] = i + 2; i++; tmp = *SAnm++; } sa_sint_t c3 = T[i + 3]; if (c3 < 0) { T[i + 3] = c3 & SAINT_MAX; SA[tmp] = i + 3; i++; tmp = *SAnm++; } } for (j += 6; i < j; i += 1) { sa_sint_t c = T[i]; if (c < 0) { T[i] = c & SAINT_MAX; SA[tmp] = i; i++; tmp = *SAnm++; } } } static void libsais_merge_nonunique_lms_suffixes_32s(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, fast_sint_t l, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { const fast_sint_t prefetch_distance = 32; const sa_sint_t * RESTRICT SAnm = &SA[(fast_sint_t)n - (fast_sint_t)m - 1 + l]; fast_sint_t i, j; sa_sint_t tmp = *SAnm++; for (i = omp_block_start, j = omp_block_start + omp_block_size - 3; i < j; i += 4) { libsais_prefetch(&SA[i + prefetch_distance]); if (SA[i + 0] == 0) { SA[i + 0] = tmp; tmp = *SAnm++; } if (SA[i + 1] == 0) { SA[i + 1] = tmp; tmp = *SAnm++; } if (SA[i + 2] == 0) { SA[i + 2] = tmp; tmp = *SAnm++; } if (SA[i + 3] == 0) { SA[i + 3] = tmp; tmp = *SAnm++; } } for (j += 3; i < j; i += 1) { if (SA[i] == 0) { SA[i] = tmp; tmp = *SAnm++; } } } static void libsais_merge_unique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; if (omp_num_threads == 1) { libsais_merge_unique_lms_suffixes_32s(T, SA, n, m, 0, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_negative_marked_suffixes(T, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = 0; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } libsais_merge_unique_lms_suffixes_32s(T, SA, n, m, count, omp_block_start, omp_block_size); } } #endif } } static void libsais_merge_nonunique_lms_suffixes_32s_omp(sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && m >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); UNUSED(thread_state); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = (m / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : m - omp_block_start; if (omp_num_threads == 1) { libsais_merge_nonunique_lms_suffixes_32s(SA, n, m, f, omp_block_start, omp_block_size); } #if defined(_OPENMP) else { { thread_state[omp_thread_num].state.count = libsais_count_zero_marked_suffixes(SA, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t t, count = f; for (t = 0; t < omp_thread_num; ++t) { count += thread_state[t].state.count; } libsais_merge_nonunique_lms_suffixes_32s(SA, n, m, count, omp_block_start, omp_block_size); } } #endif } } static void libsais_merge_compacted_lms_suffixes_32s_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { libsais_merge_unique_lms_suffixes_32s_omp(T, SA, n, m, threads, thread_state); libsais_merge_nonunique_lms_suffixes_32s_omp(SA, n, m, f, threads, thread_state); } static void libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t * RESTRICT buckets, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (f > 0) { memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t)); libsais_count_and_gather_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); libsais_reconstruct_lms_suffixes_omp(SA, n, m - f, threads); memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t)); memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t)); libsais_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state); } else { libsais_count_and_gather_lms_suffixes_32s_2k(T, SA, n, k, buckets, 0, n); libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads); } } static void libsais_reconstruct_compacted_lms_suffixes_32s_1k_omp(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t m, sa_sint_t fs, sa_sint_t f, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (f > 0) { memmove(&SA[n - m - 1], &SA[n + fs - m], (size_t)f * sizeof(sa_sint_t)); libsais_gather_compacted_lms_suffixes_32s(T, SA, n); libsais_reconstruct_lms_suffixes_omp(SA, n, m - f, threads); memcpy(&SA[n - m - 1 + f], &SA[0], ((size_t)m - (size_t)f) * sizeof(sa_sint_t)); memset(&SA[0], 0, (size_t)m * sizeof(sa_sint_t)); libsais_merge_compacted_lms_suffixes_32s_omp(T, SA, n, m, f, threads, thread_state); } else { libsais_gather_lms_suffixes_32s(T, SA, n); libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads); } } static sa_sint_t libsais_main_32s(sa_sint_t * RESTRICT T, sa_sint_t * RESTRICT SA, sa_sint_t n, sa_sint_t k, sa_sint_t fs, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { if (k > 0 && fs / k >= 6) { sa_sint_t alignment = (fs - 1024) / k >= 6 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 6 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 6 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 6 * k]; sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t)); sa_sint_t first_lms_suffix = SA[n - m]; sa_sint_t left_suffixes_count = libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_6k(T, k, buckets, first_lms_suffix); libsais_radix_sort_lms_suffixes_32s_6k_omp(T, SA, n, m, &buckets[4 * k], threads, thread_state); libsais_radix_sort_set_markers_32s_6k_omp(SA, k, &buckets[4 * k], threads); if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); } libsais_initialize_buckets_for_partial_sorting_32s_6k(T, k, buckets, first_lms_suffix, left_suffixes_count); libsais_induce_partial_order_32s_6k_omp(T, SA, n, k, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state); sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state); if (names < m) { sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais_count_lms_suffixes_32s_2k(T, n, k, buckets); } libsais_initialize_buckets_start_and_end_32s_4k(k, buckets); libsais_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets); libsais_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state); } else { SA[0] = SA[n - 1]; libsais_initialize_buckets_start_and_end_32s_6k(k, buckets); libsais_place_lms_suffixes_histogram_32s_6k(SA, n, k, m, buckets); libsais_induce_final_order_32s_6k(T, SA, n, k, buckets, threads, thread_state); } return 0; } else if (k > 0 && fs / k >= 4) { sa_sint_t alignment = (fs - 1024) / k >= 4 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 4 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 4 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 4 * k]; sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { libsais_initialize_buckets_for_radix_and_partial_sorting_32s_4k(T, k, buckets, SA[n - m]); libsais_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state); libsais_radix_sort_set_markers_32s_4k_omp(SA, k, &buckets[1], threads); libsais_place_lms_suffixes_interval_32s_4k(SA, n, k, m - 1, buckets); libsais_induce_partial_order_32s_4k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_4k_omp(SA, n, m, threads, thread_state); if (names < m) { sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais_count_lms_suffixes_32s_2k(T, n, k, buckets); } } else { SA[0] = SA[n - 1]; } libsais_initialize_buckets_start_and_end_32s_4k(k, buckets); libsais_place_lms_suffixes_histogram_32s_4k(SA, n, k, m, buckets); libsais_induce_final_order_32s_4k(T, SA, n, k, buckets, threads, thread_state); return 0; } else if (k > 0 && fs / k >= 2) { sa_sint_t alignment = (fs - 1024) / k >= 2 ? 1024 : 16; sa_sint_t * RESTRICT buckets = (fs - alignment) / k >= 2 ? (sa_sint_t *)libsais_align_up(&SA[n + fs - 2 * k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : &SA[n + fs - 2 * k]; sa_sint_t m = libsais_count_and_gather_lms_suffixes_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); if (m > 1) { libsais_initialize_buckets_for_lms_suffixes_radix_sort_32s_2k(T, k, buckets, SA[n - m]); libsais_radix_sort_lms_suffixes_32s_2k_omp(T, SA, n, m, &buckets[1], threads, thread_state); libsais_place_lms_suffixes_interval_32s_2k(SA, n, k, m - 1, buckets); libsais_initialize_buckets_start_and_end_32s_2k(k, buckets); libsais_induce_partial_order_32s_2k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads); if (names < m) { sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais_reconstruct_compacted_lms_suffixes_32s_2k_omp(T, SA, n, k, m, fs, f, buckets, threads, thread_state); } else { libsais_count_lms_suffixes_32s_2k(T, n, k, buckets); } } else { SA[0] = SA[n - 1]; } libsais_initialize_buckets_end_32s_2k(k, buckets); libsais_place_lms_suffixes_histogram_32s_2k(SA, n, k, m, buckets); libsais_initialize_buckets_start_and_end_32s_2k(k, buckets); libsais_induce_final_order_32s_2k(T, SA, n, k, buckets, threads, thread_state); return 0; } else { sa_sint_t * buffer = fs < k ? (sa_sint_t *)libsais_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096) : (sa_sint_t *)NULL; sa_sint_t alignment = fs - 1024 >= k ? 1024 : 16; sa_sint_t * RESTRICT buckets = fs - alignment >= k ? (sa_sint_t *)libsais_align_up(&SA[n + fs - k - alignment], (size_t)alignment * sizeof(sa_sint_t)) : fs >= k ? &SA[n + fs - k] : buffer; if (buckets == NULL) { return -2; } memset(SA, 0, (size_t)n * sizeof(sa_sint_t)); libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_end_32s_1k(k, buckets); sa_sint_t m = libsais_radix_sort_lms_suffixes_32s_1k(T, SA, n, buckets); if (m > 1) { libsais_induce_partial_order_32s_1k_omp(T, SA, n, k, buckets, threads, thread_state); sa_sint_t names = libsais_renumber_and_mark_distinct_lms_suffixes_32s_1k_omp(T, SA, n, m, threads); if (names < m) { if (buffer != NULL) { libsais_free_aligned(buffer); buckets = NULL; } sa_sint_t f = libsais_compact_lms_suffixes_32s_omp(T, SA, n, m, fs, threads, thread_state); if (libsais_main_32s(SA + n + fs - m + f, SA, m - f, names - f, fs + n - 2 * m + f, threads, thread_state) != 0) { return -2; } libsais_reconstruct_compacted_lms_suffixes_32s_1k_omp(T, SA, n, m, fs, f, threads, thread_state); if (buckets == NULL) { buckets = buffer = (sa_sint_t *)libsais_alloc_aligned((size_t)k * sizeof(sa_sint_t), 4096); } if (buckets == NULL) { return -2; } } libsais_count_suffixes_32s(T, n, k, buckets); libsais_initialize_buckets_end_32s_1k(k, buckets); libsais_place_lms_suffixes_interval_32s_1k(T, SA, k, m, buckets); } libsais_induce_final_order_32s_1k(T, SA, n, k, buckets, threads, thread_state); libsais_free_aligned(buffer); return 0; } } int32_t libsais_main_32s_internal(int32_t * T, int32_t * SA, int32_t n, int32_t k, int32_t fs, int32_t threads) { LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL; sa_sint_t index = thread_state != NULL || threads == 1 ? libsais_main_32s(T, SA, n, k, fs, threads, thread_state) : -2; libsais_free_thread_state(thread_state); return index; } static sa_sint_t libsais_main_8u(const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t * RESTRICT buckets, sa_sint_t bwt, sa_sint_t r, sa_sint_t * RESTRICT I, sa_sint_t fs, sa_sint_t * freq, sa_sint_t threads, LIBSAIS_THREAD_STATE * RESTRICT thread_state) { sa_sint_t m = libsais_count_and_gather_lms_suffixes_8u_omp(T, SA, n, buckets, threads, thread_state); libsais_initialize_buckets_start_and_end_8u(buckets, freq); if (m > 0) { sa_sint_t first_lms_suffix = SA[n - m]; sa_sint_t left_suffixes_count = libsais_initialize_buckets_for_lms_suffixes_radix_sort_8u(T, buckets, first_lms_suffix); if (threads > 1 && n >= 65536) { memset(SA, 0, ((size_t)n - (size_t)m) * sizeof(sa_sint_t)); } libsais_radix_sort_lms_suffixes_8u_omp(T, SA, n, m, buckets, threads, thread_state); if (threads > 1 && n >= 65536) { memset(&SA[(fast_sint_t)n - (fast_sint_t)m], 0, (size_t)m * sizeof(sa_sint_t)); } libsais_initialize_buckets_for_partial_sorting_8u(T, buckets, first_lms_suffix, left_suffixes_count); libsais_induce_partial_order_8u_omp(T, SA, n, buckets, first_lms_suffix, left_suffixes_count, threads, thread_state); sa_sint_t names = libsais_renumber_and_gather_lms_suffixes_8u_omp(SA, n, m, fs, threads, thread_state); if (names < m) { if (libsais_main_32s(SA + n + fs - m, SA, m, names, fs + n - 2 * m, threads, thread_state) != 0) { return -2; } libsais_gather_lms_suffixes_8u_omp(T, SA, n, threads, thread_state); libsais_reconstruct_lms_suffixes_omp(SA, n, m, threads); } libsais_place_lms_suffixes_interval_8u(SA, n, m, buckets); } else { memset(SA, 0, (size_t)n * sizeof(sa_sint_t)); } return libsais_induce_final_order_8u_omp(T, SA, n, bwt, r, I, buckets, threads, thread_state); } static sa_sint_t libsais_main(const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t * freq, sa_sint_t threads) { LIBSAIS_THREAD_STATE * RESTRICT thread_state = threads > 1 ? libsais_alloc_thread_state(threads) : NULL; sa_sint_t * RESTRICT buckets = (sa_sint_t *)libsais_alloc_aligned(8 * ALPHABET_SIZE * sizeof(sa_sint_t), 4096); sa_sint_t index = buckets != NULL && (thread_state != NULL || threads == 1) ? libsais_main_8u(T, SA, n, buckets, bwt, r, I, fs, freq, threads, thread_state) : -2; libsais_free_aligned(buckets); libsais_free_thread_state(thread_state); return index; } static sa_sint_t libsais_main_ctx(const LIBSAIS_CONTEXT * ctx, const uint8_t * T, sa_sint_t * SA, sa_sint_t n, sa_sint_t bwt, sa_sint_t r, sa_sint_t * I, sa_sint_t fs, sa_sint_t * freq) { return ctx != NULL && (ctx->buckets != NULL && (ctx->thread_state != NULL || ctx->threads == 1)) ? libsais_main_8u(T, SA, n, ctx->buckets, bwt, r, I, fs, freq, (sa_sint_t)ctx->threads, ctx->thread_state) : -2; } static void libsais_bwt_copy_8u(uint8_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n) { const fast_sint_t prefetch_distance = 32; fast_sint_t i, j; for (i = 0, j = (fast_sint_t)n - 7; i < j; i += 8) { libsais_prefetch(&A[i + prefetch_distance]); U[i + 0] = (uint8_t)A[i + 0]; U[i + 1] = (uint8_t)A[i + 1]; U[i + 2] = (uint8_t)A[i + 2]; U[i + 3] = (uint8_t)A[i + 3]; U[i + 4] = (uint8_t)A[i + 4]; U[i + 5] = (uint8_t)A[i + 5]; U[i + 6] = (uint8_t)A[i + 6]; U[i + 7] = (uint8_t)A[i + 7]; } for (j += 7; i < j; i += 1) { U[i] = (uint8_t)A[i]; } } #if defined(_OPENMP) static void libsais_bwt_copy_8u_omp(uint8_t * RESTRICT U, sa_sint_t * RESTRICT A, sa_sint_t n, sa_sint_t threads) { #if defined(_OPENMP) #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); fast_sint_t omp_block_stride = ((fast_sint_t)n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : (fast_sint_t)n - omp_block_start; #else UNUSED(threads); fast_sint_t omp_block_start = 0; fast_sint_t omp_block_size = (fast_sint_t)n; #endif libsais_bwt_copy_8u(U + omp_block_start, A + omp_block_start, (sa_sint_t)omp_block_size); } } #endif void * libsais_create_ctx(void) { return (void *)libsais_create_ctx_main(1); } void libsais_free_ctx(void * ctx) { libsais_free_ctx_main((LIBSAIS_CONTEXT *)ctx); } int32_t libsais(const uint8_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq) { if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } return libsais_main(T, SA, n, 0, 0, NULL, fs, freq, 1); } int32_t libsais_ctx(const void * ctx, const uint8_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq) { if ((ctx == NULL) || (T == NULL) || (SA == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } return libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, SA, n, 0, 0, NULL, fs, freq); } int32_t libsais_bwt(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } sa_sint_t index = libsais_main(T, A, n, 1, 0, NULL, fs, freq, 1); if (index >= 0) { index++; U[0] = T[n - 1]; libsais_bwt_copy_8u(U + 1, A, index - 1); libsais_bwt_copy_8u(U + index, A + index, n - index); } return index; } int32_t libsais_bwt_aux(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } I[0] = n; return 0; } if (libsais_main(T, A, n, 1, r, I, fs, freq, 1) != 0) { return -2; } U[0] = T[n - 1]; libsais_bwt_copy_8u(U + 1, A, I[0] - 1); libsais_bwt_copy_8u(U + I[0], A + I[0], n - I[0]); return 0; } int32_t libsais_bwt_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq) { if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } sa_sint_t index = libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, 0, NULL, fs, freq); if (index >= 0) { index++; U[0] = T[n - 1]; #if defined(_OPENMP) libsais_bwt_copy_8u_omp(U + 1, A, index - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); libsais_bwt_copy_8u_omp(U + index, A + index, n - index, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); #else libsais_bwt_copy_8u(U + 1, A, index - 1); libsais_bwt_copy_8u(U + index, A + index, n - index); #endif } return index; } int32_t libsais_bwt_aux_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I) { if ((ctx == NULL) || (T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } I[0] = n; return 0; } if (libsais_main_ctx((const LIBSAIS_CONTEXT *)ctx, T, A, n, 1, r, I, fs, freq) != 0) { return -2; } U[0] = T[n - 1]; #if defined(_OPENMP) libsais_bwt_copy_8u_omp(U + 1, A, I[0] - 1, (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); libsais_bwt_copy_8u_omp(U + I[0], A + I[0], n - I[0], (sa_sint_t)((const LIBSAIS_CONTEXT *)ctx)->threads); #else libsais_bwt_copy_8u(U + 1, A, I[0] - 1); libsais_bwt_copy_8u(U + I[0], A + I[0], n - I[0]); #endif return 0; } #if defined(_OPENMP) void * libsais_create_ctx_omp(int32_t threads) { if (threads < 0) { return NULL; } threads = threads > 0 ? threads : omp_get_max_threads(); return (void *)libsais_create_ctx_main(threads); } int32_t libsais_omp(const uint8_t * T, int32_t * SA, int32_t n, int32_t fs, int32_t * freq, int32_t threads) { if ((T == NULL) || (SA == NULL) || (n < 0) || (fs < 0) || (threads < 0)) { return -1; } else if (n < 2) { if (n == 1) { SA[0] = 0; } return 0; } threads = threads > 0 ? threads : omp_get_max_threads(); return libsais_main(T, SA, n, 0, 0, NULL, fs, freq, threads); } int32_t libsais_bwt_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (threads < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0]; } return n; } threads = threads > 0 ? threads : omp_get_max_threads(); sa_sint_t index = libsais_main(T, A, n, 1, 0, NULL, fs, freq, threads); if (index >= 0) { index++; U[0] = T[n - 1]; libsais_bwt_copy_8u_omp(U + 1, A, index - 1, threads); libsais_bwt_copy_8u_omp(U + index, A + index, n - index, threads); } return index; } int32_t libsais_bwt_aux_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, int32_t fs, int32_t * freq, int32_t r, int32_t * I, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || (fs < 0) || (r < 2) || ((r & (r - 1)) != 0) || (I == NULL) || (threads < 0)) { return -1; } else if (n <= 1) { if (n == 1) { U[0] = T[0];} I[0] = n; return 0; } threads = threads > 0 ? threads : omp_get_max_threads(); if (libsais_main(T, A, n, 1, r, I, fs, freq, threads) != 0) { return -2; } U[0] = T[n - 1]; libsais_bwt_copy_8u_omp(U + 1, A, I[0] - 1, threads); libsais_bwt_copy_8u_omp(U + I[0], A + I[0], n - I[0], threads); return 0; } #endif static LIBSAIS_UNBWT_CONTEXT * libsais_unbwt_create_ctx_main(sa_sint_t threads) { LIBSAIS_UNBWT_CONTEXT * RESTRICT ctx = (LIBSAIS_UNBWT_CONTEXT *)libsais_alloc_aligned(sizeof(LIBSAIS_UNBWT_CONTEXT), 64); sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais_alloc_aligned(ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t), 4096); uint16_t * RESTRICT fastbits = (uint16_t *)libsais_alloc_aligned((1 + (1 << UNBWT_FASTBITS)) * sizeof(uint16_t), 4096); sa_uint_t * RESTRICT buckets = threads > 1 ? (sa_uint_t *)libsais_alloc_aligned((size_t)threads * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) * sizeof(sa_uint_t), 4096) : NULL; if (ctx != NULL && bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1)) { ctx->bucket2 = bucket2; ctx->fastbits = fastbits; ctx->buckets = buckets; ctx->threads = threads; return ctx; } libsais_free_aligned(buckets); libsais_free_aligned(fastbits); libsais_free_aligned(bucket2); libsais_free_aligned(ctx); return NULL; } static void libsais_unbwt_free_ctx_main(LIBSAIS_UNBWT_CONTEXT * ctx) { if (ctx != NULL) { libsais_free_aligned(ctx->buckets); libsais_free_aligned(ctx->fastbits); libsais_free_aligned(ctx->bucket2); libsais_free_aligned(ctx); } } static void libsais_unbwt_compute_histogram(const uint8_t * RESTRICT T, fast_sint_t n, sa_uint_t * RESTRICT count) { const fast_sint_t prefetch_distance = 256; const uint8_t * RESTRICT T_p = T; if (n >= 1024) { sa_uint_t copy[4 * (ALPHABET_SIZE + 16)]; memset(copy, 0, 4 * (ALPHABET_SIZE + 16) * sizeof(sa_uint_t)); sa_uint_t * RESTRICT copy0 = copy + 0 * (ALPHABET_SIZE + 16); sa_uint_t * RESTRICT copy1 = copy + 1 * (ALPHABET_SIZE + 16); sa_uint_t * RESTRICT copy2 = copy + 2 * (ALPHABET_SIZE + 16); sa_uint_t * RESTRICT copy3 = copy + 3 * (ALPHABET_SIZE + 16); for (; T_p < (uint8_t * )((ptrdiff_t)(T + 63) & (-64)); T_p += 1) { copy0[T_p[0]]++; } fast_uint_t x = ((const uint32_t *)(const void *)T_p)[0], y = ((const uint32_t *)(const void *)T_p)[1]; for (; T_p < (uint8_t * )((ptrdiff_t)(T + n - 8) & (-64)); T_p += 64) { libsais_prefetch(&T_p[prefetch_distance]); fast_uint_t z = ((const uint32_t *)(const void *)T_p)[2], w = ((const uint32_t *)(const void *)T_p)[3]; copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; x = ((const uint32_t *)(const void *)T_p)[4]; y = ((const uint32_t *)(const void *)T_p)[5]; copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++; copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++; z = ((const uint32_t *)(const void *)T_p)[6]; w = ((const uint32_t *)(const void *)T_p)[7]; copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; x = ((const uint32_t *)(const void *)T_p)[8]; y = ((const uint32_t *)(const void *)T_p)[9]; copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++; copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++; z = ((const uint32_t *)(const void *)T_p)[10]; w = ((const uint32_t *)(const void *)T_p)[11]; copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; x = ((const uint32_t *)(const void *)T_p)[12]; y = ((const uint32_t *)(const void *)T_p)[13]; copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++; copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++; z = ((const uint32_t *)(const void *)T_p)[14]; w = ((const uint32_t *)(const void *)T_p)[15]; copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; x = ((const uint32_t *)(const void *)T_p)[16]; y = ((const uint32_t *)(const void *)T_p)[17]; copy0[(uint8_t)z]++; z >>= 8; copy1[(uint8_t)z]++; z >>= 8; copy2[(uint8_t)z]++; z >>= 8; copy3[z]++; copy0[(uint8_t)w]++; w >>= 8; copy1[(uint8_t)w]++; w >>= 8; copy2[(uint8_t)w]++; w >>= 8; copy3[w]++; } copy0[(uint8_t)x]++; x >>= 8; copy1[(uint8_t)x]++; x >>= 8; copy2[(uint8_t)x]++; x >>= 8; copy3[x]++; copy0[(uint8_t)y]++; y >>= 8; copy1[(uint8_t)y]++; y >>= 8; copy2[(uint8_t)y]++; y >>= 8; copy3[y]++; T_p += 8; fast_uint_t i; for (i = 0; i < ALPHABET_SIZE; i++) { count[i] += copy0[i] + copy1[i] + copy2[i] + copy3[i]; } } for (; T_p < T + n; T_p += 1) { count[T_p[0]]++; } } static void libsais_unbwt_transpose_bucket2(sa_uint_t * RESTRICT bucket2) { fast_uint_t x, y, c, d; for (x = 0; x != ALPHABET_SIZE; x += 16) { for (c = x; c != x + 16; ++c) { for (d = c + 1; d != x + 16; ++d) { sa_uint_t tmp = bucket2[(d << 8) + c]; bucket2[(d << 8) + c] = bucket2[(c << 8) + d]; bucket2[(c << 8) + d] = tmp; } } for (y = x + 16; y != ALPHABET_SIZE; y += 16) { for (c = x; c != x + 16; ++c) { sa_uint_t * bucket2_yc = &bucket2[(y << 8) + c]; sa_uint_t * bucket2_cy = &bucket2[(c << 8) + y]; sa_uint_t tmp00 = bucket2_yc[ 0 * 256]; bucket2_yc[ 0 * 256] = bucket2_cy[ 0]; bucket2_cy[ 0] = tmp00; sa_uint_t tmp01 = bucket2_yc[ 1 * 256]; bucket2_yc[ 1 * 256] = bucket2_cy[ 1]; bucket2_cy[ 1] = tmp01; sa_uint_t tmp02 = bucket2_yc[ 2 * 256]; bucket2_yc[ 2 * 256] = bucket2_cy[ 2]; bucket2_cy[ 2] = tmp02; sa_uint_t tmp03 = bucket2_yc[ 3 * 256]; bucket2_yc[ 3 * 256] = bucket2_cy[ 3]; bucket2_cy[ 3] = tmp03; sa_uint_t tmp04 = bucket2_yc[ 4 * 256]; bucket2_yc[ 4 * 256] = bucket2_cy[ 4]; bucket2_cy[ 4] = tmp04; sa_uint_t tmp05 = bucket2_yc[ 5 * 256]; bucket2_yc[ 5 * 256] = bucket2_cy[ 5]; bucket2_cy[ 5] = tmp05; sa_uint_t tmp06 = bucket2_yc[ 6 * 256]; bucket2_yc[ 6 * 256] = bucket2_cy[ 6]; bucket2_cy[ 6] = tmp06; sa_uint_t tmp07 = bucket2_yc[ 7 * 256]; bucket2_yc[ 7 * 256] = bucket2_cy[ 7]; bucket2_cy[ 7] = tmp07; sa_uint_t tmp08 = bucket2_yc[ 8 * 256]; bucket2_yc[ 8 * 256] = bucket2_cy[ 8]; bucket2_cy[ 8] = tmp08; sa_uint_t tmp09 = bucket2_yc[ 9 * 256]; bucket2_yc[ 9 * 256] = bucket2_cy[ 9]; bucket2_cy[ 9] = tmp09; sa_uint_t tmp10 = bucket2_yc[10 * 256]; bucket2_yc[10 * 256] = bucket2_cy[10]; bucket2_cy[10] = tmp10; sa_uint_t tmp11 = bucket2_yc[11 * 256]; bucket2_yc[11 * 256] = bucket2_cy[11]; bucket2_cy[11] = tmp11; sa_uint_t tmp12 = bucket2_yc[12 * 256]; bucket2_yc[12 * 256] = bucket2_cy[12]; bucket2_cy[12] = tmp12; sa_uint_t tmp13 = bucket2_yc[13 * 256]; bucket2_yc[13 * 256] = bucket2_cy[13]; bucket2_cy[13] = tmp13; sa_uint_t tmp14 = bucket2_yc[14 * 256]; bucket2_yc[14 * 256] = bucket2_cy[14]; bucket2_cy[14] = tmp14; sa_uint_t tmp15 = bucket2_yc[15 * 256]; bucket2_yc[15 * 256] = bucket2_cy[15]; bucket2_cy[15] = tmp15; } } } } static void libsais_unbwt_compute_bigram_histogram_single(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_uint_t index) { fast_uint_t sum, c; for (sum = 1, c = 0; c < ALPHABET_SIZE; ++c) { fast_uint_t prev = sum; sum += bucket1[c]; bucket1[c] = (sa_uint_t)prev; if (prev != sum) { sa_uint_t * RESTRICT bucket2_p = &bucket2[c << 8]; { fast_uint_t hi = index; if (sum < hi) { hi = sum; } libsais_unbwt_compute_histogram(&T[prev], (fast_sint_t)(hi - prev), bucket2_p); } { fast_uint_t lo = index + 1; if (prev > lo) { lo = prev; } libsais_unbwt_compute_histogram(&T[lo - 1], (fast_sint_t)(sum - lo), bucket2_p); } } } libsais_unbwt_transpose_bucket2(bucket2); } static void libsais_unbwt_calculate_fastbits(sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t lastc, fast_uint_t shift) { fast_uint_t v, w, sum, c, d; for (v = 0, w = 0, sum = 1, c = 0; c < ALPHABET_SIZE; ++c) { if (c == lastc) { sum += 1; } for (d = 0; d < ALPHABET_SIZE; ++d, ++w) { fast_uint_t prev = sum; sum += bucket2[w]; bucket2[w] = (sa_uint_t)prev; if (prev != sum) { for (; v <= ((sum - 1) >> shift); ++v) { fastbits[v] = (uint16_t)w; } } } } } static void libsais_unbwt_calculate_biPSI(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_uint_t index, fast_sint_t omp_block_start, fast_sint_t omp_block_end) { { fast_sint_t i = omp_block_start, j = (fast_sint_t)index; if (omp_block_end < j) { j = omp_block_end; } for (; i < j; ++i) { fast_uint_t c = T[i]; fast_uint_t p = bucket1[c]++; fast_sint_t t = (fast_sint_t)(index - p); if (t != 0) { fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c; P[bucket2[w]++] = (sa_uint_t)i; } } } { fast_sint_t i = (fast_sint_t)index, j = omp_block_end; if (omp_block_start > i) { i = omp_block_start; } for (i += 1; i <= j; ++i) { fast_uint_t c = T[i - 1]; fast_uint_t p = bucket1[c]++; fast_sint_t t = (fast_sint_t)(index - p); if (t != 0) { fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c; P[bucket2[w]++] = (sa_uint_t)i; } } } } static void libsais_unbwt_init_single(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits) { sa_uint_t bucket1[ALPHABET_SIZE]; fast_uint_t index = I[0]; fast_uint_t lastc = T[0]; fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } if (freq != NULL) { memcpy(bucket1, freq, ALPHABET_SIZE * sizeof(sa_uint_t)); } else { memset(bucket1, 0, ALPHABET_SIZE * sizeof(sa_uint_t)); libsais_unbwt_compute_histogram(T, n, bucket1); } memset(bucket2, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t)); libsais_unbwt_compute_bigram_histogram_single(T, bucket1, bucket2, index); libsais_unbwt_calculate_fastbits(bucket2, fastbits, lastc, shift); libsais_unbwt_calculate_biPSI(T, P, bucket1, bucket2, index, 0, n); } #if defined(_OPENMP) static void libsais_unbwt_compute_bigram_histogram_parallel(const uint8_t * RESTRICT T, fast_uint_t index, sa_uint_t * RESTRICT bucket1, sa_uint_t * RESTRICT bucket2, fast_sint_t omp_block_start, fast_sint_t omp_block_size) { fast_sint_t i; for (i = omp_block_start; i < omp_block_start + omp_block_size; ++i) { fast_uint_t c = T[i]; fast_uint_t p = bucket1[c]++; fast_sint_t t = (fast_sint_t)(index - p); if (t != 0) { fast_uint_t w = (((fast_uint_t)T[p + (fast_uint_t)(t >> ((sizeof(fast_sint_t) * 8) - 1))]) << 8) + c; bucket2[w]++; } } } static void libsais_unbwt_init_parallel(const uint8_t * RESTRICT T, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads) { sa_uint_t bucket1[ALPHABET_SIZE]; fast_uint_t index = I[0]; fast_uint_t lastc = T[0]; fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } memset(bucket1, 0, ALPHABET_SIZE * sizeof(sa_uint_t)); memset(bucket2, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t)); #pragma omp parallel num_threads(threads) if(threads > 1 && n >= 65536) { fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); if (omp_num_threads == 1) { libsais_unbwt_init_single(T, P, n, freq, I, bucket2, fastbits); } else { sa_uint_t * RESTRICT bucket1_local = buckets + omp_thread_num * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)); sa_uint_t * RESTRICT bucket2_local = bucket1_local + ALPHABET_SIZE; fast_sint_t omp_block_stride = (n / omp_num_threads) & (-16); fast_sint_t omp_block_start = omp_thread_num * omp_block_stride; fast_sint_t omp_block_size = omp_thread_num < omp_num_threads - 1 ? omp_block_stride : n - omp_block_start; { memset(bucket1_local, 0, ALPHABET_SIZE * sizeof(sa_uint_t)); libsais_unbwt_compute_histogram(T + omp_block_start, omp_block_size, bucket1_local); } #pragma omp barrier #pragma omp master { { sa_uint_t * RESTRICT bucket1_temp = buckets; fast_sint_t t; for (t = 0; t < omp_num_threads; ++t, bucket1_temp += ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) { fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket1[c], B = bucket1_temp[c]; bucket1[c] = A + B; bucket1_temp[c] = A; } } } { fast_uint_t sum, c; for (sum = 1, c = 0; c < ALPHABET_SIZE; ++c) { fast_uint_t prev = sum; sum += bucket1[c]; bucket1[c] = (sa_uint_t)prev; } } } #pragma omp barrier { fast_sint_t c; for (c = 0; c < ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket1[c], B = bucket1_local[c]; bucket1_local[c] = A + B; } memset(bucket2_local, 0, ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t)); libsais_unbwt_compute_bigram_histogram_parallel(T, index, bucket1_local, bucket2_local, omp_block_start, omp_block_size); } #pragma omp barrier { fast_sint_t omp_bucket2_stride = ((ALPHABET_SIZE * ALPHABET_SIZE) / omp_num_threads) & (-16); fast_sint_t omp_bucket2_start = omp_thread_num * omp_bucket2_stride; fast_sint_t omp_bucket2_size = omp_thread_num < omp_num_threads - 1 ? omp_bucket2_stride : (ALPHABET_SIZE * ALPHABET_SIZE) - omp_bucket2_start; sa_uint_t * RESTRICT bucket2_temp = buckets + ALPHABET_SIZE; fast_sint_t t; for (t = 0; t < omp_num_threads; ++t, bucket2_temp += ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) { fast_sint_t c; for (c = omp_bucket2_start; c < omp_bucket2_start + omp_bucket2_size; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_temp[c]; bucket2[c] = A + B; bucket2_temp[c] = A; } } } #pragma omp barrier #pragma omp master { libsais_unbwt_calculate_fastbits(bucket2, fastbits, lastc, shift); { fast_sint_t t; for (t = omp_num_threads - 1; t >= 1; --t) { sa_uint_t * RESTRICT dst_bucket1 = buckets + t * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)); sa_uint_t * RESTRICT src_bucket1 = dst_bucket1 - (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)); memcpy(dst_bucket1, src_bucket1, ALPHABET_SIZE * sizeof(sa_uint_t)); } memcpy(buckets, bucket1, ALPHABET_SIZE * sizeof(sa_uint_t)); } } #pragma omp barrier { fast_sint_t c; for (c = 0; c < ALPHABET_SIZE * ALPHABET_SIZE; c += 1) { sa_uint_t A = bucket2[c], B = bucket2_local[c]; bucket2_local[c] = A + B; } libsais_unbwt_calculate_biPSI(T, P, bucket1_local, bucket2_local, index, omp_block_start, omp_block_start + omp_block_size); } #pragma omp barrier #pragma omp master { memcpy(bucket2, buckets + ALPHABET_SIZE + (omp_num_threads - 1) * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)), ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t)); } } } } #endif static void libsais_unbwt_decode_1(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t * i0, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; fast_uint_t i, p0 = *i0; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); } *i0 = p0; } static void libsais_unbwt_decode_2(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); fast_uint_t i, p0 = *i0, p1 = *i1; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); } *i0 = p0; *i1 = p1; } static void libsais_unbwt_decode_3(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); } *i0 = p0; *i1 = p1; *i2 = p2; } static void libsais_unbwt_decode_4(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; } static void libsais_unbwt_decode_5(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; } static void libsais_unbwt_decode_6(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r); uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4); uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; } static void libsais_unbwt_decode_7(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r); uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r); uint16_t * RESTRICT U6 = (uint16_t *)(void *)(((uint8_t *)U5) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4); uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5); uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = libsais_bswap16(c6); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6; } static void libsais_unbwt_decode_8(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_uint_t shift, fast_uint_t r, fast_uint_t * i0, fast_uint_t * i1, fast_uint_t * i2, fast_uint_t * i3, fast_uint_t * i4, fast_uint_t * i5, fast_uint_t * i6, fast_uint_t * i7, fast_uint_t k) { uint16_t * RESTRICT U0 = (uint16_t *)(void *)U; uint16_t * RESTRICT U1 = (uint16_t *)(void *)(((uint8_t *)U0) + r); uint16_t * RESTRICT U2 = (uint16_t *)(void *)(((uint8_t *)U1) + r); uint16_t * RESTRICT U3 = (uint16_t *)(void *)(((uint8_t *)U2) + r); uint16_t * RESTRICT U4 = (uint16_t *)(void *)(((uint8_t *)U3) + r); uint16_t * RESTRICT U5 = (uint16_t *)(void *)(((uint8_t *)U4) + r); uint16_t * RESTRICT U6 = (uint16_t *)(void *)(((uint8_t *)U5) + r); uint16_t * RESTRICT U7 = (uint16_t *)(void *)(((uint8_t *)U6) + r); fast_uint_t i, p0 = *i0, p1 = *i1, p2 = *i2, p3 = *i3, p4 = *i4, p5 = *i5, p6 = *i6, p7 = *i7; for (i = 0; i != k; ++i) { uint16_t c0 = fastbits[p0 >> shift]; if (bucket2[c0] <= p0) { do { c0++; } while (bucket2[c0] <= p0); } p0 = P[p0]; U0[i] = libsais_bswap16(c0); uint16_t c1 = fastbits[p1 >> shift]; if (bucket2[c1] <= p1) { do { c1++; } while (bucket2[c1] <= p1); } p1 = P[p1]; U1[i] = libsais_bswap16(c1); uint16_t c2 = fastbits[p2 >> shift]; if (bucket2[c2] <= p2) { do { c2++; } while (bucket2[c2] <= p2); } p2 = P[p2]; U2[i] = libsais_bswap16(c2); uint16_t c3 = fastbits[p3 >> shift]; if (bucket2[c3] <= p3) { do { c3++; } while (bucket2[c3] <= p3); } p3 = P[p3]; U3[i] = libsais_bswap16(c3); uint16_t c4 = fastbits[p4 >> shift]; if (bucket2[c4] <= p4) { do { c4++; } while (bucket2[c4] <= p4); } p4 = P[p4]; U4[i] = libsais_bswap16(c4); uint16_t c5 = fastbits[p5 >> shift]; if (bucket2[c5] <= p5) { do { c5++; } while (bucket2[c5] <= p5); } p5 = P[p5]; U5[i] = libsais_bswap16(c5); uint16_t c6 = fastbits[p6 >> shift]; if (bucket2[c6] <= p6) { do { c6++; } while (bucket2[c6] <= p6); } p6 = P[p6]; U6[i] = libsais_bswap16(c6); uint16_t c7 = fastbits[p7 >> shift]; if (bucket2[c7] <= p7) { do { c7++; } while (bucket2[c7] <= p7); } p7 = P[p7]; U7[i] = libsais_bswap16(c7); } *i0 = p0; *i1 = p1; *i2 = p2; *i3 = p3; *i4 = p4; *i5 = p5; *i6 = p6; *i7 = p7; } static void libsais_unbwt_decode(uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, fast_sint_t blocks, fast_uint_t reminder) { fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } fast_uint_t offset = 0; while (blocks > 8) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7]; libsais_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, (fast_uint_t)r >> 1); I += 8; blocks -= 8; offset += 8 * (fast_uint_t)r; } if (blocks == 1) { fast_uint_t i0 = I[0]; libsais_unbwt_decode_1(U + offset, P, bucket2, fastbits, shift, &i0, reminder >> 1); } else if (blocks == 2) { fast_uint_t i0 = I[0], i1 = I[1]; libsais_unbwt_decode_2(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, reminder >> 1); libsais_unbwt_decode_1(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, &i0, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 3) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2]; libsais_unbwt_decode_3(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, reminder >> 1); libsais_unbwt_decode_2(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 4) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3]; libsais_unbwt_decode_4(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, reminder >> 1); libsais_unbwt_decode_3(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 5) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4]; libsais_unbwt_decode_5(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, reminder >> 1); libsais_unbwt_decode_4(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 6) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5]; libsais_unbwt_decode_6(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, reminder >> 1); libsais_unbwt_decode_5(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else if (blocks == 7) { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6]; libsais_unbwt_decode_7(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, reminder >> 1); libsais_unbwt_decode_6(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, ((fast_uint_t)r >> 1) - (reminder >> 1)); } else { fast_uint_t i0 = I[0], i1 = I[1], i2 = I[2], i3 = I[3], i4 = I[4], i5 = I[5], i6 = I[6], i7 = I[7]; libsais_unbwt_decode_8(U + offset, P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, &i7, reminder >> 1); libsais_unbwt_decode_7(U + offset + 2 * (reminder >> 1), P, bucket2, fastbits, shift, (fast_uint_t)r, &i0, &i1, &i2, &i3, &i4, &i5, &i6, ((fast_uint_t)r >> 1) - (reminder >> 1)); } } static void libsais_unbwt_decode_omp(const uint8_t * RESTRICT T, uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_sint_t threads) { fast_uint_t lastc = T[0]; fast_sint_t blocks = 1 + (((fast_sint_t)n - 1) / (fast_sint_t)r); fast_uint_t reminder = (fast_uint_t)n - ((fast_uint_t)r * ((fast_uint_t)blocks - 1)); #if defined(_OPENMP) fast_sint_t max_threads = blocks < threads ? blocks : threads; #pragma omp parallel num_threads(max_threads) if(max_threads > 1 && n >= 65536) #endif { #if defined(_OPENMP) fast_sint_t omp_thread_num = omp_get_thread_num(); fast_sint_t omp_num_threads = omp_get_num_threads(); #else UNUSED(threads); fast_sint_t omp_thread_num = 0; fast_sint_t omp_num_threads = 1; #endif fast_sint_t omp_block_stride = blocks / omp_num_threads; fast_sint_t omp_block_reminder = blocks % omp_num_threads; fast_sint_t omp_block_size = omp_block_stride + (omp_thread_num < omp_block_reminder); fast_sint_t omp_block_start = omp_block_stride * omp_thread_num + (omp_thread_num < omp_block_reminder ? omp_thread_num : omp_block_reminder); libsais_unbwt_decode(U + r * omp_block_start, P, n, r, I + omp_block_start, bucket2, fastbits, omp_block_size, omp_thread_num < omp_num_threads - 1 ? (fast_uint_t)r : reminder); } U[n - 1] = (uint8_t)lastc; } static sa_sint_t libsais_unbwt_core(const uint8_t * RESTRICT T, uint8_t * RESTRICT U, sa_uint_t * RESTRICT P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * RESTRICT I, sa_uint_t * RESTRICT bucket2, uint16_t * RESTRICT fastbits, sa_uint_t * RESTRICT buckets, sa_sint_t threads) { #if defined(_OPENMP) if (threads > 1 && n >= 262144) { libsais_unbwt_init_parallel(T, P, n, freq, I, bucket2, fastbits, buckets, threads); } else #else UNUSED(buckets); #endif { libsais_unbwt_init_single(T, P, n, freq, I, bucket2, fastbits); } libsais_unbwt_decode_omp(T, U, P, n, r, I, bucket2, fastbits, threads); return 0; } static sa_sint_t libsais_unbwt_main(const uint8_t * T, uint8_t * U, sa_uint_t * P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * I, sa_sint_t threads) { fast_uint_t shift = 0; while ((n >> shift) > (1 << UNBWT_FASTBITS)) { shift++; } sa_uint_t * RESTRICT bucket2 = (sa_uint_t *)libsais_alloc_aligned(ALPHABET_SIZE * ALPHABET_SIZE * sizeof(sa_uint_t), 4096); uint16_t * RESTRICT fastbits = (uint16_t *)libsais_alloc_aligned(((size_t)1 + (size_t)(n >> shift)) * sizeof(uint16_t), 4096); sa_uint_t * RESTRICT buckets = threads > 1 && n >= 262144 ? (sa_uint_t *)libsais_alloc_aligned((size_t)threads * (ALPHABET_SIZE + (ALPHABET_SIZE * ALPHABET_SIZE)) * sizeof(sa_uint_t), 4096) : NULL; sa_sint_t index = bucket2 != NULL && fastbits != NULL && (buckets != NULL || threads == 1 || n < 262144) ? libsais_unbwt_core(T, U, P, n, freq, r, I, bucket2, fastbits, buckets, threads) : -2; libsais_free_aligned(buckets); libsais_free_aligned(fastbits); libsais_free_aligned(bucket2); return index; } static sa_sint_t libsais_unbwt_main_ctx(const LIBSAIS_UNBWT_CONTEXT * ctx, const uint8_t * T, uint8_t * U, sa_uint_t * P, sa_sint_t n, const sa_sint_t * freq, sa_sint_t r, const sa_uint_t * I) { return ctx != NULL && ctx->bucket2 != NULL && ctx->fastbits != NULL && (ctx->buckets != NULL || ctx->threads == 1) ? libsais_unbwt_core(T, U, P, n, freq, r, I, ctx->bucket2, ctx->fastbits, ctx->buckets, (sa_sint_t)ctx->threads) : -2; } void * libsais_unbwt_create_ctx(void) { return (void *)libsais_unbwt_create_ctx_main(1); } void libsais_unbwt_free_ctx(void * ctx) { libsais_unbwt_free_ctx_main((LIBSAIS_UNBWT_CONTEXT *)ctx); } int32_t libsais_unbwt(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i) { return libsais_unbwt_aux(T, U, A, n, freq, n, &i); } int32_t libsais_unbwt_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i) { return libsais_unbwt_aux_ctx(ctx, T, U, A, n, freq, n, &i); } int32_t libsais_unbwt_aux(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } return libsais_unbwt_main(T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I, 1); } int32_t libsais_unbwt_aux_ctx(const void * ctx, const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } return libsais_unbwt_main_ctx((const LIBSAIS_UNBWT_CONTEXT *)ctx, T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I); } #if defined(_OPENMP) void * libsais_unbwt_create_ctx_omp(int32_t threads) { if (threads < 0) { return NULL; } threads = threads > 0 ? threads : omp_get_max_threads(); return (void *)libsais_unbwt_create_ctx_main(threads); } int32_t libsais_unbwt_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t i, int32_t threads) { return libsais_unbwt_aux_omp(T, U, A, n, freq, n, &i, threads); } int32_t libsais_unbwt_aux_omp(const uint8_t * T, uint8_t * U, int32_t * A, int32_t n, const int32_t * freq, int32_t r, const int32_t * I, int32_t threads) { if ((T == NULL) || (U == NULL) || (A == NULL) || (n < 0) || ((r != n) && ((r < 2) || ((r & (r - 1)) != 0))) || (I == NULL) || (threads < 0)) { return -1; } else if (n <= 1) { if (I[0] != n) { return -1; } if (n == 1) { U[0] = T[0]; } return 0; } fast_sint_t t; for (t = 0; t <= (n - 1) / r; ++t) { if (I[t] <= 0 || I[t] > n) { return -1; } } threads = threads > 0 ? threads : omp_get_max_threads(); return libsais_unbwt_main(T, U, (sa_uint_t *)A, n, freq, r, (const sa_uint_t *)I, threads); } #endif
test_core.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007-2017 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * RELIC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with RELIC. If not, see <http://www.gnu.org/licenses/>. */ /** * @file * * Tests for configuration management. * * @ingroup test */ #include <stdio.h> #include <relic.h> #include <relic_test.h> #if MULTI == PTHREAD void *master(void *ptr) { int *code = (int *)ptr; core_init(); THROW(ERR_NO_MEMORY); if (err_get_code() != STS_ERR) { *code = STS_ERR; } else { *code = STS_OK; } core_clean(); return NULL; } void *tester(void *ptr) { int *code = (int *)ptr; core_init(); if (err_get_code() != STS_OK) { *code = STS_ERR; } else { *code = STS_OK; } core_clean(); return NULL; } #endif int main(void) { int code = STS_ERR; /* Initialize library with default configuration. */ if (core_init() != STS_OK) { core_clean(); return 1; } util_banner("Tests for the CORE module:\n", 0); TEST_ONCE("the library context is consistent") { TEST_ASSERT(core_get() != NULL, end); } TEST_END; TEST_ONCE("switching the library context is correct") { ctx_t new_ctx, *old_ctx; /* Backup the old context. */ old_ctx = core_get(); /* Switch the library context. */ core_set(&new_ctx); /* Reinitialize library with new context. */ core_init(); /* Run function to manipulate the library context. */ THROW(ERR_NO_MEMORY); core_set(old_ctx); TEST_ASSERT(err_get_code() == STS_OK, end); core_set(&new_ctx); TEST_ASSERT(err_get_code() == STS_ERR, end); /* Now we need to finalize the new context. */ core_clean(); /* And restore the original context. */ core_set(old_ctx); } TEST_END; code = STS_OK; #if MULTI == OPENMP TEST_ONCE("library context is thread-safe") { omp_set_num_threads(CORES); #pragma omp parallel shared(code) { if (omp_get_thread_num() == 0) { THROW(ERR_NO_MEMORY); if (err_get_code() != STS_ERR) { code = STS_ERR; } } else { core_init(); if (err_get_code() != STS_OK) { code = STS_ERR; } core_clean(); } } TEST_ASSERT(code == STS_OK, end); } TEST_END; #endif #if MULTI == PTHREAD TEST_ONCE("library context is thread-safe") { pthread_t thread[CORES]; int result[CORES] = { STS_OK }; for (int i = 0; i < CORES; i++) { if (i == 0) { if (pthread_create(&(thread[0]), NULL, master, &(result[0]))) { code = STS_ERR; } } else { if (pthread_create(&(thread[i]), NULL, tester, &(result[i]))) { code = STS_ERR; } } if (result[i] != STS_OK) { code = STS_ERR; } } for (int i = 0; i < CORES; i++) { if (pthread_join(thread[i], NULL)) { code = STS_ERR; } } TEST_ASSERT(code == STS_OK, end); } TEST_END; #endif util_banner("All tests have passed.\n", 0); end: core_clean(); return code; }
DRB066-pointernoaliasing-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Freshly allocated pointers do not alias to each other. */ #include "omprace.h" #include <omp.h> #include <stdlib.h> void setup(int N) { double * m_pdv_sum = (double* ) malloc (sizeof (double) * N ); double * m_nvol = (double* ) malloc (sizeof (double) * N ); #pragma omp parallel for schedule(static) for (int i=0; i < N; ++i ) { m_pdv_sum[ i ] = 0.0; m_nvol[ i ] = i*2.5; } free(m_pdv_sum); free(m_nvol); } int main() { omprace_init(); int N =1000; setup(N); omprace_fini(); return 0; }
GB_unop__log2_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log2_fp32_fp32 // op(A') function: GB_unop_tran__log2_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = log2f (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log2f (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = log2f (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG2 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log2_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = log2f (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = log2f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log2_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SharedHeap.h
#ifndef SHARED_HEAP_H_ #define SHARED_HEAP_H_ #include <assert.h> #include "CommonParallel.h" #ifdef __UPC__ #include <upc.h> #elif defined _OPENMP #include <omp.h> #elif defined USE_MPI #include <mpi.h> #else #warn "No parallelism has been chosen at compile time... did you want OpenMP (cc -fopenmp), MPI (mpicc -DUSE_MPI) or UPC (upcc)?" #endif #if defined (__cplusplus) extern "C" { #endif #ifndef MEM_ALIGN_SIZE #define MEM_ALIGN_SIZE 8 #endif // round up to nearest word #define ALIGNED_MEM_SIZE(s) ( ((size_t) s + (MEM_ALIGN_SIZE - 1)) & ~((size_t) (MEM_ALIGN_SIZE - 1)) ) #ifdef __UPC__ // UPC #include <upc.h> #include "upc_compatiblity.h" #define ATOMIC_FETCHADD UPC_ATOMIC_FADD_I64 #define ATOMIC_CSWAP UPC_ATOMIC_CSWAP_I64 typedef struct { size_t size, idx; // data of length size is assumed immediately after this data structure } _SharedHeap; typedef shared _SharedHeap *SharedHeap; typedef shared char * SharedPtr; static SharedPtr __alloc_from_SharedHeap(SharedHeap sharedHeap, size_t size) { size = ALIGNED_MEM_SIZE(size); size_t idx = ATOMIC_FETCHADD(&(sharedHeap->idx), size); \ if (idx + size > sharedHeap->size) DIE("Thread %d: attempt to allocate past size of SharedHeap (%lld of %lld)\n", (long long idx), (long long) sharedHeap->size); SharedPtr data = (SharedPtr) sharedHeap; return data + idx + ALIGNED_MEM_SIZE( sizeof(_SharedHeap) ); } /* only MYTHREAD == rank will alloate the blocks of memory */ #define INIT_SHARED_HEAP(sharedHeap, blocks, bytes, rank) \ SharedHeap sharedHeap = NULL; \ if (MYTHREAD == rank) { \ size_t align_bytes = ALIGNED_MEM_SIZE(bytes); \ sharedHeap = (SharedHeap) upc_alloc(blocks * align_bytes + ALIGNED_MEM_SIZE( sizeof(_SharedHeap) ) ); \ if (sharedHeap == NULL) DIE("Thread %d: could not allocate %ld bytes for SharedHeap\n", MYTHREAD, blocks * align_bytes); \ sharedHeap->size = blocks * align_bytes; \ sharedHeap->idx = 0; \ upc_fence; \ } while (0) #define FREE_SHARED_HEAP(sharedHeap) { upc_free(sharedHeap); sharedHeap = NULL; } while(0) #define ALLOC_FROM_SHARED_HEAP(sharedHeap, type, varname, count) shared type *varname = (shared type *) __alloc_from_SharedHeap(sharedHeap, sizeof(type) * count) #define SHARED_MEMGET(dst, src, size) upc_memget(dst, src, size) #define SHARED_MEMPUT(dst, src, size) upc_memput(dst, src, size) #else // NOT UPC #ifdef MPI_VERSION // // MPI, ensure mpi.h is loaded #include <mpi.h> struct { size_t size, idx; MPI_Win win; int rank; // data is assumed to be immediately following this data structure } _SharedHeap; typedef _SharedHeap *SharedHeap; struct { SharedHeap heap; size_t offset; } SharedPtr; static size_t __shared_atomic_fetchadd(SharedPtr ptr, size_t val) { size_t oldVal; CHECK_MPI( MPI_Win_lock( MPI_LOCK_EXCLUSIVE, ptr.heap->rank, 0, ptr.heap->win) ); CHECK_MPI( MPI_Fetch_and_op( &val, &oldVal, MPI_LONG_LONG_INT, ptr.heap->rank, ptr.offset, MPI_SUM, ptr.heap->win ) ); CHECK_MPI( MPI_Win_unlock( ptr.heap->rank, ptr.heap->win ) ); return oldVal; } #define ATOMIC_FETCHADD __shared_atomic_fetchadd #define ATOMIC_CSWAP TODO static SharedPtr __alloc_from_SharedHeap(SharedHeap sharedHeap, size_t size) { assert(sharedHeap); size = ALIGNED_MEM_SIZE(size); SharedPtr ptr; ptr.heap = sharedHeap; ptr.offset = ((char *) &(sharedHeap->idx)) - ((char *) sharedHeap); ptr.offset = ATOMIC_FETCHADD(ptr, size); if (ptr.offset + size > sharedHeap->size) DIE("Could not allocate %ld bytes from SharedHeap (on thread %d)\n", size, sharedHeap->rank); return ptr; } static void __shared_memput(SharedPtr dst, char *src, size_t size) { CHECK_MPI( MPI_Put(src, size, MPI_BYTE, dst.heap->rank, dst.offset, size, MPI_BYTE, dst.heap->win) ); CHECK_MPI( MPI_Win_flush( dst.heap->rank, dst.heap->win ) ); } static void __shared_memget(char *dst, SharedPtr src, size_t size) { CHECK_MPI( MPI_Get(dst, size, MPI_BYTE, dst.heap->rank, dst.offset, size, MPI_BYTE, dst.heap->win) ); } /* only MYTHREAD == rank will allocate the blocks of memory */ #define INIT_SHARED_HEAP(sharedHeap, blocks, bytes, rank) \ SharedHeap sharedHeap; \ { \ size_t align_bytes = blocks * ALIGNED_MEM_SIZE(bytes); \ size_t alloc_bytes = (MYTHREAD == rank ? align_bytes : 0) + ALIGNED_MEM_SIZE( sizeof(_SharedHeap) ); \ CHECK_MPI( MPI_Alloc_mem(alloc_bytes, MPI_INFO_NULL, sharedHeap) ); \ if (sharedHeap == NULL) DIE("Thread %d: could not allocate %ld bytes for SharedHeap\n", MYTHREAD, blocks * align_bytes); \ sharedHeap->size = align_bytes; \ sharedHeap->idx = 0; \ sharedHeap->rank = rank; \ CHECK_MPI( MPI_Win_create( sharedHeap, MYTHREAD == rank ? alloc_bytes : 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &(sharedHeap->win) ); ); \ } while (0) #define FREE_SHARED_HEAP(sharedHeap) \ { \ if (sharedHeap->idx != 0) assert( MYTHREAD == sharedHeap->rank); /* only 1 rank has allocation */ \ CHECK_MPI( MPI_Win_free(&(sharedHeap->win)) ); \ CHECK_MPI( MPI_Free_mem( sharedHeap ) ); \ sharedHeap = NULL; \ } while(0) #define ALLOC_FROM_SHARED_HEAP(sharedHeap, type, varname, count) \ SharedPtr varname = __alloc_from_SharedHeap(sharedHeap, sizeof(type) * count) #define SHARED_MEMPUT(dst, src, size) __shared_memput(dst, src, size) #define SHARED_MEMGET(dst, src, size) __shared_memget(dst, src, size) #else // NOT MPI // OpenMP or fake it! struct { size_t size, idx; } _SharedHeap; typedef _SharedHeap *SharedHeap; typedef char * SharedPtr; static size_t __shared_atomic_fetchadd(SharedPtr ptr, size_t val) { #ifdef GCC_EXTENSION return __sync_fetch_and_add(ptr, val); #endif size_t t; #ifdef OPENMP_3_1 #pragma omp atomic capture #else #pragma omp critical(_fetchadd) #endif { t = *ptr; *ptr += val; } return t; } #define ATOMIC_FETCHADD __shared_atomic_fetchadd #define ATOMIC_CSWAP TODO static SharedPtr __alloc_from_SharedHeap(SharedHeap sharedHeap, size_t size) { assert(sharedHeap); size = ALIGNED_MEM_SIZE(size); size_t idx = ATOMIC_FETCHADD(&(sharedHeap->idx), size); \ if (idx + size > sharedHeap->size) DIE("Thread %d: attempt to allocate past size of SharedHeap (%lld of %lld)\n", (long long idx), (long long) sharedHeap->size); SharedPtr data = (SharedPtr) sharedHeap; return data + idx + ALIGNED_MEM_SIZE( sizeof(_SharedHeap) ); } /* only MYTHREAD == rank will allocate the blocks of memory */ #define INIT_SHARED_HEAP(sharedHeap, blocks, bytes, rank) \ SharedHeap sharedHeap = NULL; \ if (MYTHREAD == rank) { \ size_t alignedBytes = ALIGNED_MEM_SIZE( bytes ) * blocks; \ sharedHeap = (SharedHeap) malloc( alignedBytes + ALIGNED_MEM_SIZE( sizeof(_SharedHeap) ) ); \ if (sharedHeap == NULL) DIE("Could not allocate %lld bytes for SharedHeap\n", (long long) alignedBytes); \ sharedHeap->size = alignedBytes; \ sharedHeap->idx = 0; \ } #define FREE_SHARED_HEAP(sharedHeap) \ { \ free(sharedHeap); \ sharedHeap = NULL; \ } while(0) #define ALLOC_FROM_SHARED_HEAP(sharedHeap, type, varname, count) \ SharedPtr varname = __alloc_from_SharedHeap(sharedHeap, sizeof(type) * count) #define SHARED_MEMPUT(dst, src, size) memcpy(dst, src, size) #define SHARED_MEMGET(dst, src, size) memcpy(dst, src, size) #endif // NOT MPI #endif // NOT UPC #if defined (__cplusplus) } #endif #endif // SHARED_HEAP_H_
openmp-unsupported.c
// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core.builtin -fopenmp -verify %s // expected-no-diagnostics void openmp_parallel_crash_test() { #pragma omp parallel ; }
GB_binop__div_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__div_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint32) // A*D function (colscale): GB (_AxD__div_uint32) // D*A function (rowscale): GB (_DxB__div_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__div_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__div_uint32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint32) // C=scalar+B GB (_bind1st__div_uint32) // C=scalar+B' GB (_bind1st_tran__div_uint32) // C=A+scalar GB (_bind2nd__div_uint32) // C=A'+scalar GB (_bind2nd_tran__div_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_UNSIGNED (x, y, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_UINT32 || GxB_NO_DIV_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (x, bij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_UNSIGNED (aij, y, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 32) ; \ } GrB_Info GB (_bind1st_tran__div_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 32) ; \ } GrB_Info GB (_bind2nd_tran__div_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_builder.c
//------------------------------------------------------------------------------ // GB_builder: build a matrix from tuples //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // CALLED BY: GB_build, GB_wait, GB_transpose, GB_concat_hyper // This function is called by GB_build to build a matrix T for GrB_Matrix_build // or GrB_Vector_build, by GB_wait to build a matrix T from the list of pending // tuples, and by GB_transpose to transpose a matrix or vector. Duplicates can // appear if called by GB_build or GB_wait, but not GB_transpose. // The indices are provided either as (I_input,J_input) or (I_work,J_work), not // both. The values are provided as S_input or S_work, not both. On return, // the *work arrays are either transplanted into T, or freed, since they are // temporary workspaces. // The work is done in major 5 Steps, some of which can be skipped, depending // on how the tuples are provided (*_work or *_input), and whether or not they // are sorted, or have duplicates. If vdim <= 1, some work is skipped (for // GrB_Vectors, and single-vector GrB_Matrices). Let e be the of tuples on // input. Let p be the # of threads used. // STEP 1: copy user input. O(e/p) read/write per thread, or skipped. // STEP 2: sort the tuples. Time: O((e log e)/p), read/write, or skipped if // the tuples are already sorted. // STEP 3: count vectors and duplicates. O(e/p) reads, per thread, if no // duplicates, or skipped if already done. O(e/p) read/writes // per thread if duplicates appear. // STEP 4: construct T->h and T->p. O(e/p) reads per thread, or skipped if // T is a vector. // STEP 5: assemble the tuples. O(e/p) read/writes per thread, or O(1) if the // values can be transplanted into T as-is. // For GrB_Matrix_build: If the input (I_input, J_input, S_input) is already // sorted with no duplicates, and no typecasting needs to be done, then Step 1 // still must be done (each thread does O(e/p) reads of (I_input,J_input) and // writes to I_work), but Step 1 also does the work for Step 3. Step 2 and 3 // are skipped. Step 4 does O(e/p) reads per thread (J_input only). Then // I_work is transplanted into T->i. Step 5 does O(e/p) read/writes per thread // to copy Sx into T->x. // For GrB_Vector_build: as GrB_Matrix_build, Step 1 does O(e/p) read/writes // per thread. The input is always a vector, so vdim == 1 always holds. Step // 2 is skipped if the indices are already sorted, and Step 3 does no work at // all unless duplicates appear. Step 4 takes no time, for any vector. Step 5 // does O(e/p) reads/writes per thread. // For GB_wait: the pending tuples are provided as I_work, J_work, and S_work, // so Step 1 is skipped (no need to check for invalid indices). The input // J_work may be null (vdim can be anything, since GB_wait is used for both // vectors and matrices). The tuples might be in sorted order already, which // is known precisely known from A->Pending->sorted. Step 2 does // O((e log e)/p) work to sort the tuples. Duplicates may appear, and // out-of-order tuples are likely. Step 3 does O(e/p) read/writes. Step 4 // does O(e/p) reads per thread of (I_work,J_work), or just I_work. Step 5 // does O(e/p) read/writes per thread, or O(1) time if S_work can be // transplanted into T->x. // For GB_transpose: uses I_work, J_work, and either S_input (if no op applied // to the values) or S_work (if an op was applied to the A->x values). This is // only done for matrices, not vectors, so vdim > 1 will always hold. The // indices are valid so Step 1 is skipped. The tuples are not sorted, so Step // 2 takes O((e log e)/p) time to do the sort. There are no duplicates, so // Step 3 only does O(e/p) reads of J_work to count the vectors in each slice. // Step 4 only does O(e/p) reads of J_work to compute T->h and T->p. Step 5 // does O(e/p) read/writes per thread, but it uses the simpler case in // GB_reduce_build_template since no duplicates can appear. It is unlikely // able to transplant S_work into T->x since the input will almost always be // unsorted. // For GB_concat_hyper: uses I_work, J_work, and S_work. No duplicates // appear. Tuples are not sorted on input. I_work is transplanted into C->i. // J_work and S_work are freed on output. S_work is not transplanted into // C->x. // For iso inputs/outputs: T and Sx have the same iso property. If // they are iso, then dup is always NULL. Duplicates may or may not appear // if T and Sx are iso. // (1) GrB_Matrix_build, GrB_Vector_build, and GB_wait do not pass in an iso // Sx array, where Sx is S_input for GrB*build, and S_work for GB_wait. // Sx and Tx are not iso. Duplicates may appear. dup is always present // for GrB*build, but may be either NULL or non-NULL for GB_wait. // (2) GxB_Matrix_build_Scalar and GxB_Vector_build_Scalar: always construct // iso matrices. For those methods Sx and Tx are always iso, and no dup // operator is be passed in (dup is NULL here, which is the implied 2nd // operator). Duplicates may appear. // (3) GB_transpose and GB_concat_hyper can pass in Sx as iso or // non-iso, and always passes in dup as NULL since there are no // duplicates. Sx and Tx are either both iso, or both non-iso. // This method always returns T as hypersparse, and T is iso if and only // if Sx is iso. #include "GB_build.h" #include "GB_sort.h" #include "GB_binop.h" #ifndef GBCOMPACT #include "GB_red__include.h" #endif #define GB_I_WORK(t) (((t) < 0) ? -1 : I_work [t]) #define GB_J_WORK(t) (((t) < 0) ? -1 : ((J_work == NULL) ? 0 : J_work [t])) #define GB_K_WORK(t) (((t) < 0) ? -1 : ((K_work == NULL) ? t : K_work [t])) #define GB_FREE_WORK \ { \ GB_WERK_POP (Work, int64_t) ; \ GB_FREE (I_work_handle, *I_work_size_handle) ; \ GB_FREE (J_work_handle, *J_work_size_handle) ; \ GB_FREE (S_work_handle, *S_work_size_handle) ; \ GB_FREE_WERK (&K_work, K_work_size) ; \ } //------------------------------------------------------------------------------ // GB_builder //------------------------------------------------------------------------------ GrB_Info GB_builder // build a matrix from tuples ( GrB_Matrix T, // matrix to build, static or dynamic header const GrB_Type ttype, // type of output matrix T const int64_t vlen, // length of each vector of T const int64_t vdim, // number of vectors in T const bool is_csc, // true if T is CSC, false if CSR int64_t **I_work_handle, // for (i,k) or (j,i,k) tuples size_t *I_work_size_handle, int64_t **J_work_handle, // for (j,i,k) tuples size_t *J_work_size_handle, GB_void **S_work_handle, // array of values of tuples, size ijslen, // or size 1 if S is iso size_t *S_work_size_handle, bool known_sorted, // true if tuples known to be sorted bool known_no_duplicates, // true if tuples known to not have dupl int64_t ijslen, // size of I_work and J_work arrays const bool is_matrix, // true if T a GrB_Matrix, false if vector const int64_t *restrict I_input,// original indices, size nvals const int64_t *restrict J_input,// original indices, size nvals const GB_void *restrict S_input,// array of values of tuples, size nvals, // or size 1 if S_input or S_work are iso const bool S_iso, // true if S_input or S_work are iso const int64_t nvals, // number of tuples, and size of K_work const GrB_BinaryOp dup, // binary function to assemble duplicates, // if NULL use the SECOND operator to // keep the most recent duplicate. const GrB_Type stype, // the type of S_work or S_input GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (T != NULL) ; // T is a static or dynamic header on input ASSERT (nvals >= 0) ; ASSERT_TYPE_OK (ttype, "ttype for builder", GB0) ; ASSERT_BINARYOP_OK_OR_NULL (dup, "dup for builder", GB0) ; ASSERT (I_work_handle != NULL) ; ASSERT (J_work_handle != NULL) ; ASSERT (S_work_handle != NULL) ; ASSERT (!GB_OP_IS_POSITIONAL (dup)) ; ASSERT (I_work_size_handle != NULL) ; ASSERT (J_work_size_handle != NULL) ; ASSERT (S_work_size_handle != NULL) ; //-------------------------------------------------------------------------- // get Sx //-------------------------------------------------------------------------- GB_void *restrict S_work = (*S_work_handle) ; const GB_void *restrict Sx = (S_work == NULL) ? S_input : S_work ; ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ; ASSERT (GB_IMPLIES (S_iso, ttype == stype)) ; ASSERT (GB_IMPLIES (S_iso, dup == NULL)) ; //========================================================================== // symbolic phase of the build ============================================= //========================================================================== // The symbolic phase sorts the tuples and finds any duplicates. The // output matrix T is constructed (not including T->i and T->x), and T->h // and T->p are computed. Then I_work is transplanted into T->i, or T->i is // allocated. T->x is then allocated. It is not computed until the // numeric phase. // When this function returns, I_work is either freed or transplanted into // T->i. J_work is freed, and the I_work and J_work pointers (in the // caller) are set to NULL by setting their handles to NULL. Note that // J_work may already be NULL on input, if T has one or zero vectors // (J_work_handle is always non-NULL however). GrB_Info info ; int64_t *restrict I_work = (*I_work_handle) ; int64_t *restrict J_work = (*J_work_handle) ; int64_t *restrict K_work = NULL ; size_t K_work_size = 0 ; ASSERT (*J_work_size_handle == GB_Global_memtable_size (J_work)) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (nvals, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_DECLARE (Work, int64_t) ; GB_WERK_PUSH (Work, 5*(nthreads+1), int64_t) ; if (Work == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } memset (Work, 0, Work_nitems * sizeof (int64_t)) ; int64_t *restrict tstart_slice = Work ; // nthreads+1 int64_t *restrict tnvec_slice = Work + (nthreads+1) ; // nthreads+1 int64_t *restrict tnz_slice = Work + 2*(nthreads+1) ; // nthreads+1 int64_t *restrict kbad = Work + 3*(nthreads+1) ; // nthreads int64_t *restrict ilast_slice = Work + 4*(nthreads+1) ; // nthreads //-------------------------------------------------------------------------- // partition the tuples for the threads //-------------------------------------------------------------------------- // Thread tid handles tuples tstart_slice [tid] to tstart_slice [tid+1]-1. // Each thread handles about the same number of tuples. This partition // depends only on nvals. GB_eslice (tstart_slice, nvals, nthreads) ; // tstart_slice [tid]: first tuple in slice tid // tnvec_slice [tid]: # of vectors that start in a slice. If a vector // starts in one slice and ends in another, it is // counted as being in the first slice. // tnz_slice [tid]: # of entries in a slice after removing duplicates // sentinel values for the final cumulative sum tnvec_slice [nthreads] = 0 ; tnz_slice [nthreads] = 0 ; // this becomes true if the first pass computes tnvec_slice and tnz_slice, // and if the (I_input,J_input) tuples were found to be already sorted with // no duplicates present. bool tnvec_and_tnz_slice_computed = false ; //-------------------------------------------------------------------------- // STEP 1: copy user input and check if valid //-------------------------------------------------------------------------- // If the indices are provided by (I_input,J_input), then import them into // (I_work,J_work) and check if they are valid, and sorted. If the input // happens to be already sorted, then duplicates are detected and the # of // vectors in each slice is counted. if (I_work == NULL) { //---------------------------------------------------------------------- // allocate I_work //---------------------------------------------------------------------- // allocate workspace to load and sort the index tuples: // vdim <= 1: I_work and K_work for (i,k) tuples, where i = I_input [k] // vdim > 1: also J_work for (j,i,k) tuples where i = I_input [k] and // j = J_input [k]. If the tuples are found to be already sorted on // input, then J_work is not allocated, and J_input is used instead. // The k value in the tuple gives the position in the original set of // tuples: I_input [k] and Sx [k] when vdim <= 1, and also J_input [k] // for matrices with vdim > 1. // The workspace I_work and J_work are allocated here but freed (or // transplanted) inside GB_builder. K_work is allocated, used, and // freed in GB_builder. ASSERT (J_work == NULL) ; I_work = GB_MALLOC (nvals, int64_t, I_work_size_handle) ; (*I_work_handle) = I_work ; ijslen = nvals ; if (I_work == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // create the tuples to sort, and check for any invalid indices //---------------------------------------------------------------------- known_sorted = true ; bool no_duplicates_found = true ; if (nvals == 0) { // nothing to do } else if (is_matrix) { //------------------------------------------------------------------ // C is a matrix; check both I_input and J_input //------------------------------------------------------------------ ASSERT (J_input != NULL) ; ASSERT (I_work != NULL) ; ASSERT (vdim >= 0) ; ASSERT (I_input != NULL) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(&&:known_sorted) reduction(&&:no_duplicates_found) for (tid = 0 ; tid < nthreads ; tid++) { kbad [tid] = -1 ; int64_t my_tnvec = 0 ; int64_t kstart = tstart_slice [tid] ; int64_t kend = tstart_slice [tid+1] ; int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ; int64_t jlast = (kstart == 0) ? -1 : J_input [kstart-1] ; for (int64_t k = kstart ; k < kend ; k++) { // get k-th index from user input: (i,j) int64_t i = I_input [k] ; int64_t j = J_input [k] ; if (i < 0 || i >= vlen || j < 0 || j >= vdim) { // halt if out of bounds kbad [tid] = k ; break ; } // check if the tuples are already sorted known_sorted = known_sorted && ((jlast < j) || (jlast == j && ilast <= i)) ; // check if this entry is a duplicate of the one before it no_duplicates_found = no_duplicates_found && (!(jlast == j && ilast == i)) ; // copy the tuple into I_work. J_work is done later. I_work [k] = i ; if (j > jlast) { // vector j starts in this slice (but this is // valid only if J_input is sorted on input) my_tnvec++ ; } // log the last index seen ilast = i ; jlast = j ; } // these are valid only if I_input and J_input are sorted on // input, with no duplicates present. tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = kend - kstart ; } // collect the report from each thread for (int tid = 0 ; tid < nthreads ; tid++) { if (kbad [tid] >= 0) { // invalid index int64_t i = I_input [kbad [tid]] ; int64_t j = J_input [kbad [tid]] ; int64_t row = is_csc ? i : j ; int64_t col = is_csc ? j : i ; int64_t nrows = is_csc ? vlen : vdim ; int64_t ncols = is_csc ? vdim : vlen ; GB_FREE_WORK ; GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, "index (" GBd "," GBd ") out of bounds," " must be < (" GBd ", " GBd ")", row, col, nrows, ncols) ; } } // if the tuples were found to be already in sorted order, and if // no duplicates were found, then tnvec_slice and tnz_slice are now // valid, Otherwise, they can only be computed after sorting. tnvec_and_tnz_slice_computed = known_sorted && no_duplicates_found ; //------------------------------------------------------------------ // allocate J_work, if needed //------------------------------------------------------------------ if (vdim > 1 && !known_sorted) { // copy J_input into J_work, so the tuples can be sorted J_work = GB_MALLOC (nvals, int64_t, J_work_size_handle) ; (*J_work_handle) = J_work ; if (J_work == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } GB_memcpy (J_work, J_input, nvals * sizeof (int64_t), nthreads); } else { // J_work is a shallow copy of J_input. The pointer is not // copied into (*J_work_handle), so it will not be freed. // J_input is not modified, even though it is typecast to the // int64_t *J_work, since J_work is not modified in this case. J_work = (int64_t *) J_input ; } } else { //------------------------------------------------------------------ // C is a typecasted GrB_Vector; check only I_input //------------------------------------------------------------------ ASSERT (I_input != NULL) ; ASSERT (J_input == NULL) ; ASSERT (vdim == 1) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(&&:known_sorted) reduction(&&:no_duplicates_found) for (tid = 0 ; tid < nthreads ; tid++) { kbad [tid] = -1 ; int64_t kstart = tstart_slice [tid] ; int64_t kend = tstart_slice [tid+1] ; int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ; for (int64_t k = kstart ; k < kend ; k++) { // get k-th index from user input: (i) int64_t i = I_input [k] ; if (i < 0 || i >= vlen) { // halt if out of bounds kbad [tid] = k ; break ; } // check if the tuples are already sorted known_sorted = known_sorted && (ilast <= i) ; // check if this entry is a duplicate of the one before it no_duplicates_found = no_duplicates_found && (!(ilast == i)) ; // copy the tuple into the work arrays to be sorted I_work [k] = i ; // log the last index seen ilast = i ; } } // collect the report from each thread for (int tid = 0 ; tid < nthreads ; tid++) { if (kbad [tid] >= 0) { // invalid index int64_t i = I_input [kbad [tid]] ; GB_FREE_WORK ; GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS, "index (" GBd ") out of bounds, must be < (" GBd ")", i, vlen) ; } } } //---------------------------------------------------------------------- // determine if duplicates are possible //---------------------------------------------------------------------- // The input is now known to be sorted, or not. If it is sorted, and // if no duplicates were found, then it is known to have no duplicates. // Otherwise, duplicates might appear, but a sort is required first to // check for duplicates. known_no_duplicates = known_sorted && no_duplicates_found ; } //-------------------------------------------------------------------------- // STEP 2: sort the tuples in ascending order //-------------------------------------------------------------------------- // If the tuples are known to already be sorted, Step 2 is skipped. In // that case, K_work is NULL (not allocated), which implicitly means that // K_work [k] = k for all k = 0:nvals-1. K_work is always NULL if Sx and // Tx are iso. if (!known_sorted) { //---------------------------------------------------------------------- // allocate K_work workspace (not needed if T and Sx are iso) //---------------------------------------------------------------------- if (!S_iso) { // create the k part of each tuple K_work = GB_MALLOC_WERK (nvals, int64_t, &K_work_size) ; if (K_work == NULL) { // out of memory GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } // The k part of each tuple (i,k) or (j,i,k) records the original // position of the tuple in the input list. This allows an // unstable sorting algorithm to be used. Since k is unique, it // forces the result of the sort to be stable regardless of whether // or not the sorting algorithm is stable. It also keeps track of // where the numerical value of the tuple can be found; it is in // Sx[k] for the tuple (i,k) or (j,i,k), regardless of where the // tuple appears in the list after it is sorted. int64_t k ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (k = 0 ; k < nvals ; k++) { K_work [k] = k ; } } //---------------------------------------------------------------------- // sort all the tuples //---------------------------------------------------------------------- if (vdim > 1) { //------------------------------------------------------------------ // sort a set of (j,i,k) tuples //------------------------------------------------------------------ if (S_iso) { // K_work is NULL; only sort (j,i) info = GB_msort_2 (J_work, I_work, nvals, nthreads) ; } else { info = GB_msort_3 (J_work, I_work, K_work, nvals, nthreads) ; } #ifdef GB_DEBUG if (info == GrB_SUCCESS) { int64_t ilast = -1 ; int64_t jlast = -1 ; for (int64_t k = 0 ; k < nvals ; k++) { int64_t i = I_work [k] ; int64_t j = J_work [k] ; ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ; ilast = i ; jlast = j ; } } #endif } else { //------------------------------------------------------------------ // sort a set of (i,k) tuples //------------------------------------------------------------------ if (S_iso) { // K_work is NULL; only sort (i) info = GB_msort_1 (I_work, nvals, nthreads) ; } else { info = GB_msort_2 (I_work, K_work, nvals, nthreads) ; } #ifdef GB_DEBUG if (info == GrB_SUCCESS) { int64_t ilast = -1 ; for (int64_t k = 0 ; k < nvals ; k++) { int64_t i = I_work [k] ; ASSERT (ilast <= i) ; ilast = i ; } } #endif } if (info != GrB_SUCCESS) { // out of memory in GB_msort_* GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // STEP 3: count vectors and duplicates in each slice //-------------------------------------------------------------------------- // Duplicates are located, counted and their indices negated. The # of // vectors in each slice is counted. If the indices are known to not have // duplicates, then only the vectors are counted. Counting the # of // vectors is skipped if already done by Step 1. if (known_no_duplicates) { //---------------------------------------------------------------------- // no duplicates: just count # vectors in each slice //---------------------------------------------------------------------- // This is much faster, particularly if the # of vectors in each slice // has already been computed. #ifdef GB_DEBUG { // assert that there are no duplicates int64_t ilast = -1, jlast = -1 ; for (int64_t t = 0 ; t < nvals ; t++) { int64_t i = GB_I_WORK (t), j = GB_J_WORK (t) ; bool is_duplicate = (i == ilast && j == jlast) ; ASSERT (!is_duplicate) ; ilast = i ; jlast = j ; } } #endif if (vdim <= 1) { // all tuples appear in at most one vector, and there are no // duplicates, so there is no need to scan I_work or J_work. for (int tid = 0 ; tid < nthreads ; tid++) { int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; tnvec_slice [tid] = 0 ; tnz_slice [tid] = tend - tstart ; } tnvec_slice [0] = (nvals == 0) ? 0 : 1 ; } else { // count the # of unique vector indices in J_work. No need to scan // I_work since there are no duplicates to be found. Also no need // to compute them if already found in Step 1. if (!tnvec_and_tnz_slice_computed) { int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = 0 ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t j = J_work [t] ; if (j > jlast) { // vector j starts in this slice my_tnvec++ ; jlast = j ; } } tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = tend - tstart ; } } } } else { //---------------------------------------------------------------------- // look for duplicates and count # vectors in each slice //---------------------------------------------------------------------- for (int tid = 0 ; tid < nthreads ; tid++) { int64_t tstart = tstart_slice [tid] ; ilast_slice [tid] = GB_I_WORK (tstart-1) ; } int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = 0 ; int64_t my_ndupl = 0 ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t ilast = ilast_slice [tid] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t i = I_work [t] ; int64_t j = GB_J_WORK (t) ; // tuples are now sorted but there may be duplicates ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ; // check if (j,i,k) is a duplicate if (i == ilast && j == jlast) { // flag the tuple as a duplicate I_work [t] = -1 ; my_ndupl++ ; // the sort places earlier duplicate tuples (with smaller // k) after later ones (with larger k). ASSERT (GB_K_WORK (t-1) < GB_K_WORK (t)) ; } else { // this is a new tuple if (j > jlast) { // vector j starts in this slice my_tnvec++ ; jlast = j ; } ilast = i ; } } tnvec_slice [tid] = my_tnvec ; tnz_slice [tid] = (tend - tstart) - my_ndupl ; } } //-------------------------------------------------------------------------- // find total # of vectors and duplicates in all tuples //-------------------------------------------------------------------------- // Replace tnvec_slice with its cumulative sum, after which each slice tid // will be responsible for the # vectors in T that range from tnvec_slice // [tid] to tnvec_slice [tid+1]-1. GB_cumsum (tnvec_slice, nthreads, NULL, 1, NULL) ; int64_t tnvec = tnvec_slice [nthreads] ; // Replace tnz_slice with its cumulative sum GB_cumsum (tnz_slice, nthreads, NULL, 1, NULL) ; // find the total # of final entries, after assembling duplicates int64_t tnz = tnz_slice [nthreads] ; int64_t ndupl = nvals - tnz ; //-------------------------------------------------------------------------- // allocate T; always hypersparse //-------------------------------------------------------------------------- // allocate T; allocate T->p and T->h but do not initialize them. // T is always hypersparse. The header T always exists on input, as // either a static or dynamic header. bool static_header = T->static_header ; info = GB_new (&T, static_header, // always hyper, static or dynamic header ttype, vlen, vdim, GB_Ap_malloc, is_csc, GxB_HYPERSPARSE, GB_ALWAYS_HYPER, tnvec, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_WORK ; return (info) ; } ASSERT (T->p != NULL) ; ASSERT (T->h != NULL) ; ASSERT (T->b == NULL) ; ASSERT (T->i == NULL) ; ASSERT (T->x == NULL) ; T->iso = S_iso ; // OK: T is iso if and only if Sx is iso bool do_burble = (vlen > 1 || vdim > 1) && (nvals > 1) ; if (do_burble) { if (S_iso) { GBURBLE ("(iso build) ") ; } else { GBURBLE ("(build) ") ; } } //-------------------------------------------------------------------------- // STEP 4: construct the vector pointers and hyperlist for T //-------------------------------------------------------------------------- // Step 4 scans the J_work indices and constructs T->h and T->p. int64_t *restrict Th = T->h ; int64_t *restrict Tp = T->p ; if (vdim <= 1) { //---------------------------------------------------------------------- // special case for vectors //---------------------------------------------------------------------- ASSERT (tnvec == 0 || tnvec == 1) ; if (tnvec > 0) { Th [0] = 0 ; Tp [0] = 0 ; } } else if (ndupl == 0) { //---------------------------------------------------------------------- // no duplicates appear //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnvec = tnvec_slice [tid] ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t j = GB_J_WORK (t) ; if (j > jlast) { // vector j starts in this slice Th [my_tnvec] = j ; Tp [my_tnvec] = t ; my_tnvec++ ; jlast = j ; } } } } else { //---------------------------------------------------------------------- // it is known that at least one duplicate appears //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { int64_t my_tnz = tnz_slice [tid] ; int64_t my_tnvec = tnvec_slice [tid] ; int64_t tstart = tstart_slice [tid] ; int64_t tend = tstart_slice [tid+1] ; int64_t jlast = GB_J_WORK (tstart-1) ; for (int64_t t = tstart ; t < tend ; t++) { // get the t-th tuple int64_t i = I_work [t] ; int64_t j = GB_J_WORK (t) ; if (i >= 0) { // this is a new tuple if (j > jlast) { // vector j starts in this slice Th [my_tnvec] = j ; Tp [my_tnvec] = my_tnz ; my_tnvec++ ; jlast = j ; } my_tnz++ ; } } } } // log the end of the last vector T->nvec_nonempty = tnvec ; T->nvec = tnvec ; Tp [tnvec] = tnz ; ASSERT (T->nvec == T->plen) ; T->magic = GB_MAGIC ; //-------------------------------------------------------------------------- // free J_work if it exists //-------------------------------------------------------------------------- ASSERT (J_work_handle != NULL) ; GB_FREE (J_work_handle, *J_work_size_handle) ; J_work = NULL ; //-------------------------------------------------------------------------- // allocate T->i //-------------------------------------------------------------------------- if (ndupl == 0) { // shrink I_work from size ijslen to size tnz if (tnz < ijslen) { // this cannot fail since the size is shrinking. bool ok ; GB_REALLOC (I_work, tnz, int64_t, I_work_size_handle, &ok, Context); ASSERT (ok) ; } // transplant I_work into T->i T->i = I_work ; T->i_size = (*I_work_size_handle) ; I_work = NULL ; (*I_work_handle) = NULL ; (*I_work_size_handle) = 0 ; } else { // duplicates exist, so allocate a new T->i. I_work must be freed later T->i = GB_MALLOC (tnz, int64_t, &(T->i_size)) ; if (T->i == NULL) { // out of memory GB_phbix_free (T) ; GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } } int64_t *restrict Ti = T->i ; //========================================================================== // numerical phase of the build: assemble any duplicates //========================================================================== // The tuples have been sorted. Assemble any duplicates with a switch // factory of built-in workers, or four generic workers. The vector // pointers T->p and hyperlist T->h (if hypersparse) have already been // computed. // If there are no duplicates, T->i holds the row indices of the tuple. // Otherwise, the row indices are still in I_work. K_work holds the // positions of each tuple in the array Sx. The tuples are sorted so that // duplicates are adjacent to each other and they appear in the order they // appeared in the original tuples. This method assembles the duplicates // and computes T->i and T->x from I_work, K_work, and Sx. into T, becoming // T->i. If no duplicates appear, T->i is already computed, and Sx just // needs to be copied and permuted into T->x. // The (i,k,Sx[k]) tuples are held in two integer arrays: (1) I_work or // T->i, and (2) K_work, and an array Sx of numerical values. Sx has not // been sorted, nor even accessed yet. It is identical to the original // unsorted tuples. The (i,k,Sx[k]) tuple holds the row index i, the // position k, and the value Sx [k]. This entry becomes T(i,j) = Sx [k] in // the matrix T, and duplicates (if any) are assembled via the dup // operator. //-------------------------------------------------------------------------- // get opcodes and check types //-------------------------------------------------------------------------- // With GB_build, there can be 1 to 2 different types. // T->type is identical to the types of x,y,z for z=dup(x,y). // dup is never NULL and all its three types are the same // The type of Sx (stype) can different but must be compatible // with T->type // With GB_wait, there can be 1 to 5 different types: // The pending tuples are in Sx, of type stype which must be // compatible with dup->ytype and T->type // z = dup (x,y): can be NULL or have 1 to 3 different types // T->type: must be compatible with all above types. // dup may be NULL, in which case it is assumed be the implicit SECOND // operator, with all three types equal to T->type GrB_Type xtype, ytype, ztype ; GxB_binary_function fdup ; #ifndef GBCOMPACT GB_Opcode opcode ; #endif GB_Type_code tcode = ttype->code ; const size_t tsize = ttype->size ; bool op_2nd ; ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ; if (dup == NULL) { //---------------------------------------------------------------------- // dup is the implicit SECOND operator //---------------------------------------------------------------------- // z = SECOND (x,y) where all three types are the same as ttype // T(i,j) = (ttype) Sx(k) will be done for all tuples. #ifndef GBCOMPACT opcode = GB_SECOND_opcode ; #endif xtype = ttype ; ytype = ttype ; ztype = ttype ; fdup = NULL ; op_2nd = true ; ASSERT (GB_op_is_second (dup, ttype)) ; } else { //---------------------------------------------------------------------- // dup is an explicit operator //---------------------------------------------------------------------- // T(i,j) = (ttype) Sx[k] will be done for the first tuple. // for subsequent tuples: T(i,j) += Sx[k], via the dup operator and // typecasting: // // y = (dup->ytype) Sx[k] // x = (dup->xtype) T(i,j) // z = (dup->ztype) dup (x,y) // T(i,j) = (ttype) z ASSERT_BINARYOP_OK (dup, "dup for build_factory", GB0) ; ASSERT (!S_iso) ; #ifndef GBCOMPACT opcode = dup->opcode ; #endif xtype = dup->xtype ; ytype = dup->ytype ; ztype = dup->ztype ; fdup = dup->function ; op_2nd = GB_op_is_second (dup, ttype) ; } //-------------------------------------------------------------------------- // get the sizes and codes of each type //-------------------------------------------------------------------------- GB_Type_code zcode = ztype->code ; GB_Type_code xcode = xtype->code ; GB_Type_code ycode = ytype->code ; ASSERT (GB_Type_compatible (ttype, stype)) ; // T(i,j) = (ttype) Sx ASSERT (GB_Type_compatible (ytype, stype)) ; // y = (ytype) Sx ASSERT (GB_Type_compatible (xtype, ttype)) ; // x = (xtype) T(i,j) ASSERT (GB_Type_compatible (ttype, ztype)) ; // T(i,j) = (ttype) z size_t zsize = ztype->size ; size_t xsize = xtype->size ; size_t ysize = ytype->size ; // no typecasting if all 5 types are the same bool nocasting = (ttype == stype) && (ttype == xtype) && (ttype == ytype) && (ttype == ztype) ; ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ; ASSERT_TYPE_OK (stype, "stype for build_factory", GB0) ; ASSERT_TYPE_OK (xtype, "xtype for build_factory", GB0) ; ASSERT_TYPE_OK (ytype, "ytype for build_factory", GB0) ; ASSERT_TYPE_OK (ztype, "ztype for build_factory", GB0) ; //-------------------------------------------------------------------------- // STEP 5: assemble the tuples //-------------------------------------------------------------------------- bool copy_S_into_T = (nocasting && known_sorted && ndupl == 0) ; if (copy_S_into_T && S_work != NULL) { //---------------------------------------------------------------------- // transplant S_work into T->x //---------------------------------------------------------------------- // No typecasting is needed, the tuples were originally in sorted // order, and no duplicates appear. All that is required is to copy Sx // into Tx. Sx can be directly transplanted into T->x since Sx is // provided as S_work. GB_builder must either transplant or free // S_work. The transplant can be used by GB_wait, whenever the tuples // are already sorted, with no duplicates, and no typecasting is // needed, since S_work is always A->Pending->x. T and Sx may be iso // or non-iso. T->x = S_work ; T->x_size = (*S_work_size_handle) ; S_work = NULL ; (*S_work_handle) = NULL ; (*S_work_size_handle) = 0 ; int64_t tx_size_required = tnz * tsize ; if (2 * tx_size_required < T->x_size) { // shrink the size of T->x bool ok = true ; GB_REALLOC (T->x, tx_size_required, GB_void, &(T->x_size), &ok, Context) ; } } else { //---------------------------------------------------------------------- // allocate T->x //---------------------------------------------------------------------- T->x = GB_XALLOC (S_iso, tnz, tsize, &(T->x_size)) ; if (T->x == NULL) { // out of memory GB_phbix_free (T) ; GB_FREE_WORK ; return (GrB_OUT_OF_MEMORY) ; } GB_void *restrict Tx = (GB_void *) T->x ; ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ; if (nvals == 0) { // nothing to do } else if (copy_S_into_T) { //------------------------------------------------------------------ // copy Sx into T->x //------------------------------------------------------------------ // No typecasting is needed, the tuples were originally in sorted // order, and no duplicates appear. All that is required is to // copy Sx into Tx. Sx cannot be transplanted into T->x since // S_work is NULL and S_input cannot be modified by GB_builder. ASSERT (S_work == NULL) ; ASSERT (Sx == S_input) ; GB_memcpy (Tx, Sx, (S_iso ? 1 : nvals) * tsize, nthreads) ; } else if (nocasting) { //------------------------------------------------------------------ // assemble the values, Sx, into T, no typecasting needed //------------------------------------------------------------------ // Sx (either S_work or S_input) must be permuted and copied into // T->x, since the tuples had to be sorted, or duplicates appear. // Any duplicates are now assembled. // There are 44 common cases of this function for built-in types // and 8 associative operators: MIN, MAX, PLUS, TIMES for 10 types // (all but boolean; and OR, AND, XOR, and EQ for boolean. // In addition, the FIRST and SECOND operators are hard-coded, for // another 22 workers, since SECOND is used by GB_wait and since // FIRST is useful for keeping the first tuple seen. It is // controlled by the GB_INCLUDE_SECOND_OPERATOR definition, so they // do not appear in GB_reduce_to_* where the FIRST and SECOND // operators are not needed. // Early exit cannot be exploited, so the terminal is ignored. bool done = false ; if (S_iso) { //-------------------------------------------------------------- // T and Sx are iso; set iso value and delete duplicates //-------------------------------------------------------------- memcpy (Tx, Sx, tsize) ; #define GB_ISO_BUILD #include "GB_reduce_build_template.c" done = true ; } else { //-------------------------------------------------------------- // T and Sx are not iso; call in the workers //-------------------------------------------------------------- #ifndef GBCOMPACT //---------------------------------------------------------- // define the worker for the switch factory //---------------------------------------------------------- #define GB_INCLUDE_SECOND_OPERATOR #define GB_red(opname,aname) \ GB (_red_build_ ## opname ## aname) #define GB_RED_WORKER(opname,aname,atype) \ { \ info = GB_red (opname, aname) ((atype *) Tx, Ti, \ (atype *) Sx, nvals, ndupl, I_work, K_work, \ tstart_slice, tnz_slice, nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //---------------------------------------------------------- // launch the switch factory //---------------------------------------------------------- // controlled by opcode and typecode GB_Type_code typecode = tcode ; #include "GB_red_factory.c" #endif } //------------------------------------------------------------------ // generic worker //------------------------------------------------------------------ if (!done) { if (do_burble) GBURBLE ("(generic build) ") ; //-------------------------------------------------------------- // no typecasting, but use the fdup function pointer and memcpy //-------------------------------------------------------------- // Either the fdup operator or type of Sx and T are // user-defined, or fdup is not an associative operator handled // by the GB_red_factory, or some combination of these // conditions. User-defined types cannot be typecasted, so // this handles all user-defined types. // Tx [p] = (ttype) Sx [k], but with no typecasting #undef GB_CAST_ARRAY_TO_ARRAY #define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ memcpy (Tx +((p)*tsize), Sx +((k)*tsize), tsize) ; if (op_2nd) { //---------------------------------------------------------- // dup is the SECOND operator, with no typecasting //---------------------------------------------------------- // Tx [p] += (ttype) Sx [k], but 2nd op and no typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) #include "GB_reduce_build_template.c" } else { //---------------------------------------------------------- // dup is another operator, with no typecasting needed //---------------------------------------------------------- // Tx [p] += (ttype) Sx [k], but with no typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ fdup (Tx +((p)*tsize), Tx +((p)*tsize), Sx+((k)*tsize)); #include "GB_reduce_build_template.c" } } } else { //------------------------------------------------------------------ // assemble the values Sx into T, typecasting as needed //------------------------------------------------------------------ if (do_burble) { GBURBLE ("(generic build with typecast) ") ; } // If T and Sx are iso, no typecasting is ever done, so this method // is not used in that case. ASSERT (!S_iso) ; // Sx (either S_work or S_input) must be permuted and copied into // T->x, since the tuples had to be sorted, or duplicates appear. // Any duplicates are now assembled. Not all of the 5 types are // the same, but all of them are built-in since user-defined types // cannot be typecasted. const GB_Type_code scode = stype->code ; const size_t ssize = stype->size ; GB_cast_function cast_S_to_T = GB_cast_factory (tcode, scode) ; GB_cast_function cast_S_to_Y = GB_cast_factory (ycode, scode) ; GB_cast_function cast_T_to_X = GB_cast_factory (xcode, tcode) ; GB_cast_function cast_Z_to_T = GB_cast_factory (tcode, zcode) ; ASSERT (scode <= GB_FC64_code) ; ASSERT (tcode <= GB_FC64_code) ; ASSERT (xcode <= GB_FC64_code) ; ASSERT (ycode <= GB_FC64_code) ; ASSERT (zcode <= GB_FC64_code) ; // Tx [p] = (ttype) Sx [k], with typecasting #undef GB_CAST_ARRAY_TO_ARRAY #define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ cast_S_to_T (Tx +((p)*tsize), Sx +((k)*ssize), ssize) ; if (op_2nd) { //-------------------------------------------------------------- // dup operator is the SECOND operator, with typecasting //-------------------------------------------------------------- // Tx [p] += (ttype) Sx [k], but 2nd op, with typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) #include "GB_reduce_build_template.c" } else { //-------------------------------------------------------------- // dup is another operator, with typecasting required //-------------------------------------------------------------- // Tx [p] += Sx [k], with typecasting #undef GB_ADD_CAST_ARRAY_TO_ARRAY #define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \ { \ /* ywork = (ytype) Sx [k] */ \ GB_void ywork [GB_VLA(ysize)] ; \ cast_S_to_Y (ywork, Sx +((k)*ssize), ssize) ; \ /* xwork = (xtype) Tx [p] */ \ GB_void xwork [GB_VLA(xsize)] ; \ cast_T_to_X (xwork, Tx +((p)*tsize), tsize) ; \ /* zwork = f (xwork, ywork) */ \ GB_void zwork [GB_VLA(zsize)] ; \ fdup (zwork, xwork, ywork) ; \ /* Tx [tnz-1] = (ttype) zwork */ \ cast_Z_to_T (Tx +((p)*tsize), zwork, zsize) ; \ } #include "GB_reduce_build_template.c" } } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; T->jumbled = false ; ASSERT_MATRIX_OK (T, "T built", GB0) ; ASSERT (GB_IS_HYPERSPARSE (T)) ; return (GrB_SUCCESS) ; }
task_codegen.c
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER typedef void *omp_depend_t; void foo(); // CHECK-LABEL: @main int main() { omp_depend_t d, x; int a; // CHECK: [[D_ADDR:%.+]] = alloca i8*, // CHECK: [[X_ADDR:%.+]] = alloca i8*, // CHECK: [[A_ADDR:%.+]] = alloca i32, // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num( // CHECK: [[ALLOC:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 %0, i32 1, i64 40, i64 0, i32 (i32, i8*)* bitcast (i32 (i32, [[PRIVATES_TY:%.+]]*)* [[TASK_ENTRY:@.+]] to i32 (i32, i8*)*)) // CHECK: [[DATA:%.+]] = bitcast i8* [[ALLOC]] to [[PRIVATES_TY]]* // CHECK: [[D:%.+]] = load i8*, i8** [[D_ADDR]], // CHECK: [[D_DEP:%.+]] = bitcast i8* [[D]] to %struct.kmp_depend_info* // CHECK: [[D_DEP_BASE:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[D_DEP]], i{{.+}} -1 // CHECK: [[D_DEP_BASE_SIZE:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[D_DEP_BASE]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[SIZE1:%.+]] = load i64, i64* [[D_DEP_BASE_SIZE]], // CHECK: [[SIZE:%.+]] = add nuw i64 0, [[SIZE1]] // CHECK: [[X:%.+]] = load i8*, i8** [[X_ADDR]], // CHECK: [[X_DEP:%.+]] = bitcast i8* [[X]] to %struct.kmp_depend_info* // CHECK: [[X_DEP_BASE:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[X_DEP]], i{{.+}} -1 // CHECK: [[X_DEP_BASE_SIZE:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[X_DEP_BASE]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[SIZE2:%.+]] = load i64, i64* [[X_DEP_BASE_SIZE]], // CHECK: [[SIZE3:%.+]] = add nuw i64 [[SIZE]], [[SIZE2]] // CHECK: [[SIZE:%.+]] = add nuw i64 [[SIZE3]], 1 // CHECK: [[SIZE32:%.+]] = trunc i64 [[SIZE]] to i32 // CHECK: [[SIZE64:%.+]] = zext i32 [[SIZE32]] to i64 // CHECK: [[SV:%.+]] = call i8* @llvm.stacksave() // CHECK: store i8* [[SV]], i8** [[SV_ADDR:%.+]], // CHECK: [[VLA:%.+]] = alloca %struct.kmp_depend_info, i64 [[SIZE64]], // CHECK: [[VLA0:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA]], i64 0 // CHECK: [[BASE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[A_ADDR_CAST:%.+]] = ptrtoint i32* [[A_ADDR]] to i64 // CHECK: store i64 [[A_ADDR_CAST]], i64* [[BASE_ADDR]], // CHECK: [[SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 1 // CHECK: store i64 4, i64* [[SIZE_ADDR]], // CHECK: [[FLAGS_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 2 // CHECK: store i8 1, i8* [[FLAGS_ADDR]], // CHECK: [[VLA_D:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA]], i64 1 // CHECK: [[D_SIZE:%.+]] = mul nuw i64 24, [[SIZE1]] // CHECK: [[DEST:%.+]] = bitcast %struct.kmp_depend_info* [[VLA_D]] to i8* // CHECK: [[SRC:%.+]] = bitcast %struct.kmp_depend_info* [[D_DEP]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i64 [[D_SIZE]], i1 false) // CHECK: [[VLA_X:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* %25, i64 [[SIZE1]] // CHECK: [[X_SIZE:%.+]] = mul nuw i64 24, [[SIZE2]] // CHECK: [[DEST:%.+]] = bitcast %struct.kmp_depend_info* [[VLA_X]] to i8* // CHECK: [[SRC:%.+]] = bitcast %struct.kmp_depend_info* [[X_DEP]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i64 [[X_SIZE]], i1 false) // CHECK: [[BC:%.+]] = bitcast %struct.kmp_depend_info* [[VLA]] to i8* // CHECK: call i32 @__kmpc_omp_task_with_deps(%struct.ident_t* @{{.+}}, i32 [[GTID]], i8* [[ALLOC]], i32 [[SIZE32]], i8* [[BC]], i32 0, i8* null) // CHECK: [[SV:%.+]] = load i8*, i8** [[SV_ADDR]], // CHECK: call void @llvm.stackrestore(i8* [[SV]]) #pragma omp task depend(in: a) depend(depobj: d, x) { #pragma omp taskgroup { #pragma omp task foo(); } } // CHECK: ret i32 0 return 0; } // CHECK: call void @__kmpc_taskgroup( // CHECK: call i8* @__kmpc_omp_task_alloc( // CHECK: call i32 @__kmpc_omp_task( // CHECK: call void @__kmpc_end_taskgroup( // CHECK-LINE: @bar void bar() { // CHECK: call void @__kmpc_for_static_init_4( #pragma omp for for (int i = 0; i < 10; ++i) // CHECK: [[BUF:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i32 1, i64 48, // CHECK: [[BC_BUF:%.+]] = bitcast i8* [[BUF]] to [[TT_WITH_PRIVS:%.+]]* // CHECK: [[PRIVS:%.+]] = getelementptr inbounds [[TT_WITH_PRIVS]], [[TT_WITH_PRIVS]]* [[BC_BUF]], i32 0, i32 1 // CHECK: [[I_PRIV:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}} [[PRIVS]], i32 0, i32 0 // CHECK: store i32 %{{.+}}, i32* [[I_PRIV]], // CHECK: = call i32 @__kmpc_omp_task(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i8* [[BUF]]) #pragma omp task ++i; } #endif
tentusscher_epi_2004_S1_1.c
#include <assert.h> #include <stdlib.h> #include "tentusscher_epi_2004_S1_1.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" #define ThrowClassifyException(severity,tag,label) \ {\ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \ { \ next_cluster=cluster->next; \ cluster=(Cluster *) RelinquishMagickMemory(cluster); \ } \ if (squares != (double *) NULL) \ { \ squares-=255; \ free_squares=squares; \ free_squares=(double *) RelinquishMagickMemory(free_squares); \ } \ ThrowBinaryException(severity,tag,label); \ } CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; squares=(double *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); exception=(&image->exception); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowClassifyException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(indexes+x,0); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(q->red) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->red) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(MagickRealType) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=(MagickRealType) (alpha*sum); } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace); InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); (void) TransformImageColorspace(image,previous_colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// \brief pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the stack of attributes that were pushed by /// \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; AttributeList *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack; /// \brief The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// \brief The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// \brief The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentCleanup(ParentCleanup), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// \brief A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { clang::Module *Module = nullptr; bool ModuleInterface = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' Partition, ///< 'module partition X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// \brief We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// \brief We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool EnumUnderlyingIsImplicit, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// \brief Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf ///< Condition in a constexpr if statement. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. /// /// \param AllowTopLevelCond Whether to allow the result to be the /// complete top-level condition. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// \brief The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const AttributeList *AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool CheckNoCallerSavedRegsAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive, bool allowArrayTypes); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowParamOrMoveConstructible); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowParamOrMoveConstructible); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// \brief Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// \brief Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList, UsingDirectiveDecl * &UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, AttributeList *AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, AttributeList *AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// \brief Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// \brief The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// \brief The identifier preceding the '::'. IdentifierInfo *Identifier; /// \brief The location of the identifier. SourceLocation IdentifierLoc; /// \brief The location of the '::'. SourceLocation CCLoc; /// \brief Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::LambdaScopeInfo::Capture &From); /// \brief Diagnose if an explicit lambda capture is unused. void DiagnoseUnusedLambdaCapture(const sema::LambdaScopeInfo::Capture &From); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// \brief Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// \brief After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// \brief CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// \brief Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// \brief The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, } Kind; /// \brief Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// \brief The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// \brief The entity that is being synthesized. Decl *Entity; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// \brief List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// \brief The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// \brief Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// \brief Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well-formed '\#pragma clang attribute push'. void ActOnPragmaAttributePush(AttributeList &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); /// \brief Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc); /// \brief Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); void AddNSConsumedAttr(SourceRange AttrRange, Decl *D, unsigned SpellingListIndex, bool isNSConsumed, bool isTemplateInstantiation); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// \brief Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// \brief Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// \brief Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// \brief Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// \brief Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// \brief Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Set to true inside '#pragma omp declare target' region. bool IsInOpenMPDeclareTargetContext = false; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// \brief Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level); /// \brief Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *IsOpenMPCapturedDecl(ValueDecl *D); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// \brief Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(ValueDecl *D, unsigned Level); /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, ValueDecl *D, unsigned Level); /// \brief Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// \brief Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// \brief Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// \brief Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// \brief Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// \brief Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// \brief Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return IsInOpenMPDeclareTargetContext; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// \brief Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// \brief Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// \brief Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// \brief Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// \brief Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// \brief Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'to' clause. OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// \brief Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> CUDADeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> CUDAKnownEmittedFns; /// A partial call graph maintained during CUDA compilation to support /// deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to CUDAKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> CUDACallGraph; /// Diagnostic builder for CUDA errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class CUDADiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); ~CUDADiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (CUDADiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a CUDADiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiag.hasValue()) *Diag.PartialDiag << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<PartialDiagnostic> PartialDiag; }; /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const AttributeList *Attr); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// \brief Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// \brief The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDefaultedMemberExceptionSpecs.empty() && "there shouldn't be any pending delayed defaulted member " "exception specs"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedExceptionSpecChecks) SavedExceptionSpecChecks; decltype(DelayedDefaultedMemberExceptionSpecs) SavedDefaultedMemberExceptionSpecs; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedExceptionSpecChecks.swap(S.DelayedExceptionSpecChecks); SavedDefaultedMemberExceptionSpecs.swap( S.DelayedDefaultedMemberExceptionSpecs); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// \brief Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// \brief Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// \brief Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// \brief Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// \brief This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// \brief This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList, nullptr, false); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
rmse.c
/*************************************************************************/ /** File: rmse.c **/ /** Description: calculate root mean squared error of particular **/ /** clustering. **/ /** Author: Sang-Ha Lee **/ /** University of Virginia. **/ /** **/ /** Note: euclid_dist_2() and find_nearest_point() adopted from **/ /** Minebench code. **/ /** **/ /*************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> //#include <omp.h> #include "kmeans.h" extern double wtime(void); /*----< euclid_dist_2() >----------------------------------------------------*/ /* multi-dimensional spatial Euclid distance square */ __inline float euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]); return(ans); } /*----< find_nearest_point() >-----------------------------------------------*/ __inline int find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float **pts, /* [npts][nfeatures] */ int npts) { int index, i; float max_dist=FLT_MAX; /* find the cluster center id with min distance to pt */ for (i=0; i<npts; i++) { float dist; dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */ if (dist < max_dist) { max_dist = dist; index = i; } } return(index); } /*----< rms_err(): calculates RMSE of clustering >-------------------------------------*/ float rms_err (float **feature, /* [npoints][nfeatures] */ int nfeatures, int npoints, float **cluster_centres, /* [nclusters][nfeatures] */ int nclusters) { int i; int nearest_cluster_index; /* cluster center id with min distance to pt */ float sum_euclid = 0.0; /* sum of Euclidean distance squares */ float ret; /* return value */ /* calculate and sum the sqaure of euclidean distance*/ #pragma omp parallel for \ shared(feature,cluster_centres) \ firstprivate(npoints,nfeatures,nclusters) \ private(i, nearest_cluster_index) \ schedule (static) for (i=0; i<npoints; i++) { nearest_cluster_index = find_nearest_point(feature[i], nfeatures, cluster_centres, nclusters); sum_euclid += euclid_dist_2(feature[i], cluster_centres[nearest_cluster_index], nfeatures); } /* divide by n, then take sqrt */ ret = std::sqrt(sum_euclid / npoints); return(ret); }
bml_setters_ellpack_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_introspection.h" #include "../bml_types.h" #include "bml_setters_ellpack.h" #include "bml_types_ellpack.h" #include <complex.h> #include <math.h> #include <stdio.h> #include <stdlib.h> /** Set element i,j asuming there's no resetting of any element of A. * * \ingroup setters * * \param A The matrix which takes row i * \param i The column index * \param j The row index * \param value The element to be added * \WARNING sets an element from scratch * \todo set element new. * * */ void TYPED_FUNC( bml_set_element_new_ellpack) ( bml_matrix_ellpack_t * A, int i, int j, void *element) { int A_N = A->N; int A_M = A->M; int l; int ll; REAL_T *A_value = (REAL_T *) A->value; int *A_index = A->index; int *A_nnz = A->nnz; #ifdef USE_OMP_OFFLOAD //#pragma omp target #pragma omp target update from(A_nnz[:A_N], A_index[:A_N*A_M], A_value[:A_N*A_M]) #endif A_value[ROWMAJOR(i, A_nnz[i], A_N, A_M)] = *((REAL_T *) element); A_index[ROWMAJOR(i, A_nnz[i], A_N, A_M)] = j; A_nnz[i]++; #ifdef USE_OMP_OFFLOAD #pragma omp target update to(A_nnz[:A_N], A_index[:A_N*A_M], A_value[:A_N*A_M]) #endif } /** Set element i,j of matrix A. * * \ingroup setters * * \param A The matrix which takes row i * \param i The column index * \param j The row index * \param value The element to be set * \WARNING sets an element from scratch * \todo set element new. * * */ void TYPED_FUNC( bml_set_element_ellpack) ( bml_matrix_ellpack_t * A, int i, int j, void *element) { int A_N = A->N; int A_M = A->M; int l; int ll; REAL_T *A_value = (REAL_T *) A->value; int *A_index = A->index; int *A_nnz = A->nnz; #ifdef USE_OMP_OFFLOAD #pragma omp target update from(A_nnz[:A_N], A_index[:A_N*A_M], A_value[:A_N*A_M]) #endif ll = 0; if (A_nnz[i] > 2) { for (int l = 0; l < A_nnz[i]; l++) { if (A_index[ROWMAJOR(i, l, A_N, A_M)] == j) //Something in the row at the same position { A_value[ROWMAJOR(i, l, A_N, A_M)] = *((REAL_T *) element); ll = 1; break; } } if (ll == 0) //There is something in the row but in a different position { A_value[ROWMAJOR(i, A_nnz[i], A_N, A_M)] = *((REAL_T *) element); A_index[ROWMAJOR(i, A_nnz[i], A_N, A_M)] = j; A_nnz[i]++; } } else //There is nothing in the row { A_value[ROWMAJOR(i, A_nnz[i], A_N, A_M)] = *((REAL_T *) element); A_index[ROWMAJOR(i, A_nnz[i], A_N, A_M)] = j; A_nnz[i]++; } #ifdef USE_OMP_OFFLOAD #pragma omp target update to(A_nnz[:A_N], A_index[:A_N*A_M], A_value[:A_N*A_M]) #endif } /** Set row i of matrix A. * * \ingroup setters * * \param A The matrix which takes row i * \param i The index of the row to be set * \param row The row to be set * \param threshold The threshold value to be set * */ void TYPED_FUNC( bml_set_row_ellpack) ( bml_matrix_ellpack_t * A, int i, void *_row, double threshold) { REAL_T *row = _row; int A_N = A->N; int A_M = A->M; int ll = -1; REAL_T *A_value = (REAL_T *) A->value; int *A_index = A->index; int *A_nnz = A->nnz; #ifdef USE_OMP_OFFLOAD #pragma omp target map(to:row[0:A_N]) #endif { // begin offload region for (int j = 0; j < A_N; j++) { if (ABS(row[j]) > threshold) { ll++; A_value[ROWMAJOR(i, ll, A_N, A_M)] = row[j]; A_index[ROWMAJOR(i, ll, A_N, A_M)] = j; } } A_nnz[i] = ll + 1; } // end offload region } /** Set diagonal of matrix A. * * \ingroup setters * * \param A The matrix which takes diag * \param diag The diagonal to be set * \param threshold The threshold value to be used */ void TYPED_FUNC( bml_set_diagonal_ellpack) ( bml_matrix_ellpack_t * A, void *_diagonal, double threshold) { REAL_T *diagonal = _diagonal; int A_N = A->N; int A_M = A->M; REAL_T *A_value = (REAL_T *) A->value; int *A_index = A->index; int *A_nnz = A->nnz; int ll = 0; #ifdef USE_OMP_OFFLOAD #pragma omp target parallel for map(to:diagonal[:A_N]) #endif for (int i = 0; i < A_N; i++) { ll = 0; for (int j = 0; j < A_nnz[i]; j++) { if (A_index[ROWMAJOR(i, j, A_N, A_M)] == i) { if (ABS(diagonal[i]) > threshold) { A_value[ROWMAJOR(i, j, A_N, A_M)] = diagonal[i]; } else { A_value[ROWMAJOR(i, j, A_N, A_M)] = 0.0; } ll = 1; } } /* If there is no diagonal elements then */ if (ll == 0) { if (ABS(diagonal[i]) > threshold) { A_index[ROWMAJOR(i, A_nnz[i], A_N, A_M)] = i; A_value[ROWMAJOR(i, A_nnz[i], A_N, A_M)] = diagonal[i]; A_nnz[i]++; } } } }
GB_unop__frexpe_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__frexpe_fp32_fp32) // op(A') function: GB (_unop_tran__frexpe_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_frexpef (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_frexpef (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_frexpef (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FREXPE || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__frexpe_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpef (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpef (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__frexpe_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task_unitied_thread_threadid.c
// RUN: %libomp-compile-and-run // REQUIRES: abt // Compilation error after clang11+ // UNSUPPORTED: clang #include "omp_testsuite.h" #include <string.h> #include <stdio.h> int test_task_untied_thread_threadid(int num_threads) { int vals[num_threads]; memset(vals, 0, sizeof(int) * num_threads); omp_set_max_active_levels(2); #pragma omp parallel num_threads(num_threads / 2 + 1) #pragma omp master { int i; for (i = 0; i < num_threads; i++) { #pragma omp task firstprivate(i) untied { ABT_thread abt_thread; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread)); int local_vals[num_threads]; memset(local_vals, 0, sizeof(int) * num_threads); int j; #pragma omp parallel for num_threads(num_threads) for (j = 0; j < num_threads; j++) { int l2_omp_thread_id = omp_get_thread_num(); ABT_thread l2_abt_thread; ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread)); // Context switching in OpenMP. #pragma omp taskyield int l2_omp_thread_id2 = omp_get_thread_num(); if (l2_omp_thread_id == l2_omp_thread_id2) { local_vals[j] += 1; } ABT_thread l2_abt_thread2; ABT_EXIT_IF_FAIL(ABT_thread_self(&l2_abt_thread2)); ABT_bool l2_abt_thread_equal; ABT_EXIT_IF_FAIL(ABT_thread_equal(l2_abt_thread, l2_abt_thread2, &l2_abt_thread_equal)); if (l2_abt_thread_equal == ABT_TRUE) { local_vals[j] += 2; } // Context switching in Argobots. ABT_EXIT_IF_FAIL(ABT_thread_yield()); int l2_omp_thread_id3 = omp_get_thread_num(); if (l2_omp_thread_id2 == l2_omp_thread_id3) { local_vals[j] += 4; } } // Check child threads. int child_fail = 0; for (j = 0; j < num_threads; j++) { if (local_vals[i] != 7) { child_fail = 1; } } if (!child_fail) { vals[i] += 1; } ABT_thread abt_thread2; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2)); ABT_bool abt_thread_equal; ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2, &abt_thread_equal)); if (abt_thread_equal == ABT_TRUE) { vals[i] += 2; } } } } int index; for (index = 0; index < num_threads; index++) { if (vals[index] != 3) { printf("vals[%d] == %d\n", index, vals[index]); return 0; } } return 1; } int main() { int i, num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_task_untied_thread_threadid(i + 1)) { num_failed++; } } return num_failed; }
difrancesco.c
// A Model of Cardiac Electrical Activity Incorporating Ionic Pumps and Concentration Changes // Reference: https://models.physiomeproject.org/exposure/91d93b61d7da56b6baf1f0c4d88ecd77/difrancesco_noble_1985.cellml #include "difrancesco.h" real CONSTANTS[50]; GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { #ifdef _WIN32 printf("Using DiFrancesco & Noble 1985 CPU model\n"); #else print_to_stdout_and_file("Using DiFrancesco & Noble 1985 CPU model\n"); #endif first_call = false; } // Normal sv[0] = -87; // V millivolt sv[1] = 4; // Kc millimolar sv[2] = 140; // Ki millimolar sv[3] = 8; // Nai millimolar sv[4] = 0.2; // y dimensionless sv[5] = 0.01; // x dimensionless sv[6] = 5e-5; // Cai millimolar sv[7] = 1; // s dimensionless sv[8] = 0.01; // m dimensionless sv[9] = 0.8; // h dimensionless sv[10] = 0.005; // d dimensionless sv[11] = 1; // f dimensionless sv[12] = 1; // f2 dimensionless sv[13] = 2; // Ca_up millimolar sv[14] = 1; // Ca_rel millimolar sv[15] = 1; // p dimensionless // Pacing BCL = 300 ms /* sv[0] = -89.523; // V millivolt sv[1] = 4.25602; // Kc millimolar sv[2] = 139.882; // Ki millimolar sv[3] = 8.09539; // Nai millimolar sv[4] = 0.0826828; // y dimensionless sv[5] = 0.194361; // x dimensionless sv[6] = 2.57238e-05; // Cai millimolar sv[7] = 0.420488; // s dimensionless sv[8] = 0.00255133; // m dimensionless sv[9] = 0.986752; // h dimensionless sv[10] = 5.50534e-08; // d dimensionless sv[11] = 1.0; // f dimensionless sv[12] = 0.742853; // f2 dimensionless sv[13] = 2.09383; // Ca_up millimolar sv[14] = 0.205441; // Ca_rel millimolar sv[15] = 0.987422; // p dimensionless */ CONSTANTS[0] = 8314.472; CONSTANTS[1] = 310; CONSTANTS[2] = 96485.3415; CONSTANTS[3] = 0.075; CONSTANTS[4] = 0; CONSTANTS[5] = 3; CONSTANTS[6] = 3; CONSTANTS[7] = 45; CONSTANTS[8] = 140; CONSTANTS[9] = 1e-5; CONSTANTS[10] = 180; CONSTANTS[11] = 920; CONSTANTS[12] = 210; CONSTANTS[13] = 10; CONSTANTS[14] = 0.0005; CONSTANTS[15] = 0.28; CONSTANTS[16] = 0.18; CONSTANTS[17] = 0.02; CONSTANTS[18] = 2; CONSTANTS[19] = 125; CONSTANTS[20] = 1; CONSTANTS[21] = 40; CONSTANTS[22] = 3; CONSTANTS[23] = 0.02; CONSTANTS[24] = 0.001; CONSTANTS[25] = 0.5; CONSTANTS[26] = 750; CONSTANTS[27] = 1e-5; CONSTANTS[28] = 15; CONSTANTS[29] = 0.0001; CONSTANTS[30] = 0.0001; CONSTANTS[31] = 5; CONSTANTS[32] = 0.001; CONSTANTS[33] = 0.05; CONSTANTS[34] = 2; CONSTANTS[35] = 0.1; CONSTANTS[36] = 5; CONSTANTS[37] = 0.001; CONSTANTS[38] = 0.025; CONSTANTS[39] = 2; CONSTANTS[40] = 0.05; CONSTANTS[41] = 2; CONSTANTS[42] = 0.00157; CONSTANTS[43] = 4; CONSTANTS[44] = 0.7; CONSTANTS[45] = ( CONSTANTS[0]*CONSTANTS[1])/CONSTANTS[2]; CONSTANTS[46] = 3.14159*pow(CONSTANTS[33], 2.00000)*CONSTANTS[34]; CONSTANTS[47] = CONSTANTS[46]*(1.00000 - CONSTANTS[35]); CONSTANTS[48] = CONSTANTS[47]*0.0500000; CONSTANTS[49] = CONSTANTS[47]*0.0200000; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; // uint32_t *mapping = ((uint32_t*)extra_data); #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current); // Solve the model using Forward Euler for(int i = 0; i < NEQ; i++) sv[i] = dt*rDY[i] + rY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current) { CONSTANTS[4] = stim_current; real STATES[16]; STATES[0] = sv[0]; STATES[1] = sv[1]; STATES[2] = sv[2]; STATES[3] = sv[3]; STATES[4] = sv[4]; STATES[5] = sv[5]; STATES[6] = sv[6]; STATES[7] = sv[7]; STATES[8] = sv[8]; STATES[9] = sv[9]; STATES[10] = sv[10]; STATES[11] = sv[11]; STATES[12] = sv[12]; STATES[13] = sv[13]; STATES[14] = sv[14]; STATES[15] = sv[15]; double ALGEBRAIC[46]; ALGEBRAIC[8] = ( STATES[6]*CONSTANTS[31])/CONSTANTS[32]; ALGEBRAIC[2] = ( 0.500000*exp( 0.0826000*(STATES[0]+50.0000)))/(1.00000+exp( 0.0570000*(STATES[0]+50.0000))); ALGEBRAIC[12] = ( 1.30000*exp( - 0.0600000*(STATES[0]+20.0000)))/(1.00000+exp( - 0.0400000*(STATES[0]+20.0000))); ALGEBRAIC[3] = 0.0330000*exp(- STATES[0]/17.0000); ALGEBRAIC[13] = 33.0000/(1.00000+exp(- (STATES[0]+10.0000)/8.00000)); ALGEBRAIC[5] = 20.0000*exp( - 0.125000*(STATES[0]+75.0000)); ALGEBRAIC[15] = 2000.00/( 320.000*exp( - 0.100000*(STATES[0]+75.0000))+1.00000); ALGEBRAIC[9] = ( 0.625000*(STATES[0]+34.0000))/(exp((STATES[0]+34.0000)/4.00000) - 1.00000); ALGEBRAIC[18] = 5.00000/(1.00000+exp(( - 1.00000*(STATES[0]+34.0000))/4.00000)); ALGEBRAIC[1] = 0.0500000*exp( - 0.0670000*((STATES[0]+52.0000) - 10.0000)); ALGEBRAIC[11] = (STATES[0]+52.0000) - 10.0000; ALGEBRAIC[19] = (fabs(ALGEBRAIC[11])<CONSTANTS[9] ? 2.50000 : ( 1.00000*ALGEBRAIC[11])/(1.00000 - exp( - 0.200000*ALGEBRAIC[11]))); ALGEBRAIC[4] = STATES[0]+41.0000; ALGEBRAIC[14] = (fabs(ALGEBRAIC[4])<CONSTANTS[27] ? 2000.00 : ( 200.000*ALGEBRAIC[4])/(1.00000 - exp( - 0.100000*ALGEBRAIC[4]))); ALGEBRAIC[20] = 8000.00*exp( - 0.0560000*(STATES[0]+66.0000)); ALGEBRAIC[6] = (STATES[0]+24.0000) - 5.00000; ALGEBRAIC[16] = (fabs(ALGEBRAIC[6])<CONSTANTS[29] ? 120.000 : ( 30.0000*ALGEBRAIC[6])/(1.00000 - exp(( - 1.00000*ALGEBRAIC[6])/4.00000))); ALGEBRAIC[21] = (fabs(ALGEBRAIC[6])<CONSTANTS[29] ? 120.000 : ( 12.0000*ALGEBRAIC[6])/(exp(ALGEBRAIC[6]/10.0000) - 1.00000)); ALGEBRAIC[7] = STATES[0]+34.0000; ALGEBRAIC[17] = (fabs(ALGEBRAIC[7])<CONSTANTS[30] ? 25.0000 : ( 6.25000*ALGEBRAIC[7])/(exp(ALGEBRAIC[7]/4.00000) - 1.00000)); ALGEBRAIC[22] = 50.0000/(1.00000+exp(( - 1.00000*(STATES[0]+34.0000))/4.00000)); ALGEBRAIC[0] = CONSTANTS[45]*log(CONSTANTS[8]/STATES[3]); ALGEBRAIC[30] = CONSTANTS[16]*(STATES[0] - ALGEBRAIC[0]); ALGEBRAIC[33] = ( (( CONSTANTS[19]*STATES[1])/(CONSTANTS[20]+STATES[1]))*STATES[3])/(CONSTANTS[21]+STATES[3]); ALGEBRAIC[34] = ( CONSTANTS[23]*( exp(( CONSTANTS[25]*(CONSTANTS[22] - 2.00000)*STATES[0])/CONSTANTS[45])*pow(STATES[3], CONSTANTS[22])*CONSTANTS[18] - exp(( (CONSTANTS[25] - 1.00000)*(CONSTANTS[22] - 2.00000)*STATES[0])/CONSTANTS[45])*pow(CONSTANTS[8], CONSTANTS[22])*STATES[6]))/( (1.00000+ CONSTANTS[24]*( STATES[6]*pow(CONSTANTS[8], CONSTANTS[22])+ CONSTANTS[18]*pow(STATES[3], CONSTANTS[22])))*(1.00000+STATES[6]/0.00690000)); ALGEBRAIC[35] = CONSTANTS[45]*log((CONSTANTS[8]+ 0.120000*STATES[1])/(STATES[3]+ 0.120000*STATES[2])); ALGEBRAIC[36] = CONSTANTS[26]*pow(STATES[8], 3.00000)*STATES[9]*(STATES[0] - ALGEBRAIC[35]); ALGEBRAIC[23] = (( STATES[4]*STATES[1])/(STATES[1]+CONSTANTS[7]))*CONSTANTS[5]*(STATES[0] - ALGEBRAIC[0]); ALGEBRAIC[40] = (( 0.0100000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))))*( STATES[3]*exp(50.0000/CONSTANTS[45]) - CONSTANTS[8]*exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[39] = (( 2.00000*1.00000*CONSTANTS[47]*CONSTANTS[2])/( 1.00000*CONSTANTS[38]*CONSTANTS[36]))*STATES[6]*(CONSTANTS[36] - STATES[13]); ALGEBRAIC[41] = (( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])/( 1.00000*CONSTANTS[39]))*STATES[15]*(STATES[13] - STATES[14]); ALGEBRAIC[26] = ( CONSTANTS[10]*(STATES[2] - STATES[1]*exp(- STATES[0]/CONSTANTS[45])))/140.000; ALGEBRAIC[27] = STATES[5]*ALGEBRAIC[26]; ALGEBRAIC[10] = CONSTANTS[45]*log(STATES[1]/STATES[2]); ALGEBRAIC[28] = ( (( CONSTANTS[11]*STATES[1])/(STATES[1]+CONSTANTS[12]))*(STATES[0] - ALGEBRAIC[10]))/(1.00000+exp(( ((STATES[0]+10.0000) - ALGEBRAIC[10])*2.00000)/CONSTANTS[45])); ALGEBRAIC[29] = (( (( STATES[7]*CONSTANTS[15]*(0.200000+STATES[1]/(CONSTANTS[13]+STATES[1]))*STATES[6])/(CONSTANTS[14]+STATES[6]))*(STATES[0]+10.0000))/(1.00000 - exp( - 0.200000*(STATES[0]+10.0000))))*( STATES[2]*exp(( 0.500000*STATES[0])/CONSTANTS[45]) - STATES[1]*exp(( - 0.500000*STATES[0])/CONSTANTS[45])); ALGEBRAIC[24] = (( STATES[4]*STATES[1])/(STATES[1]+CONSTANTS[7]))*CONSTANTS[6]*(STATES[0] - ALGEBRAIC[10]); ALGEBRAIC[38] = (( 0.0100000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))))*( STATES[2]*exp(50.0000/CONSTANTS[45]) - STATES[1]*exp(( - 1.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[42] = (ALGEBRAIC[28]+ALGEBRAIC[27]+ALGEBRAIC[24]+ALGEBRAIC[38]+ALGEBRAIC[29]) - 2.00000*ALGEBRAIC[33]; ALGEBRAIC[25] = ALGEBRAIC[23]+ALGEBRAIC[24]; ALGEBRAIC[31] = 0.500000*CONSTANTS[45]*log(CONSTANTS[18]/STATES[6]); ALGEBRAIC[32] = CONSTANTS[17]*(STATES[0] - ALGEBRAIC[31]); ALGEBRAIC[37] = (( 4.00000*CONSTANTS[28]*(STATES[0] - 50.0000))/( CONSTANTS[45]*(1.00000 - exp(( - 1.00000*(STATES[0] - 50.0000)*2.00000)/CONSTANTS[45]))))*( STATES[6]*exp(100.000/CONSTANTS[45]) - CONSTANTS[18]*exp(( - 2.00000*(STATES[0] - 50.0000))/CONSTANTS[45]))*STATES[10]*STATES[11]*STATES[12]; ALGEBRAIC[43] = ALGEBRAIC[37]+ALGEBRAIC[38]+ALGEBRAIC[40]; ALGEBRAIC[44] = ( (( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])/( 1.00000*CONSTANTS[40]))*STATES[14]*pow(STATES[6], CONSTANTS[41]))/(pow(STATES[6], CONSTANTS[41])+pow(CONSTANTS[37], CONSTANTS[41])); double RATES[16]; RATES[0] = (- (ALGEBRAIC[25]+ALGEBRAIC[27]+ALGEBRAIC[28]+ALGEBRAIC[29]+ALGEBRAIC[30]+ALGEBRAIC[32]+ALGEBRAIC[33]+ALGEBRAIC[34]+ALGEBRAIC[36]+ALGEBRAIC[43]+CONSTANTS[4])/CONSTANTS[3]) * 1.0E-03; RATES[1] = (- CONSTANTS[44]*(STATES[1] - CONSTANTS[43])+( 1.00000*ALGEBRAIC[42])/( 1.00000*CONSTANTS[42]*CONSTANTS[2])) * 1.0E-03; RATES[2] = ( ( - 1.00000*ALGEBRAIC[42])/( 1.00000*CONSTANTS[47]*CONSTANTS[2]) ) * 1.0E-03; RATES[3] = ( ( - 1.00000*(ALGEBRAIC[36]+ALGEBRAIC[30]+ALGEBRAIC[23]+ALGEBRAIC[40]+ ALGEBRAIC[33]*3.00000+( ALGEBRAIC[34]*CONSTANTS[22])/(CONSTANTS[22] - 2.00000)))/( 1.00000*CONSTANTS[47]*CONSTANTS[2])) * 1.0E-03; RATES[4] = (ALGEBRAIC[1]*(1.00000 - STATES[4]) - ALGEBRAIC[19]*STATES[4]) * 1.0E-03; RATES[5] = (ALGEBRAIC[2]*(1.00000 - STATES[5]) - ALGEBRAIC[12]*STATES[5]) * 1.0E-03; RATES[6] = (( - 1.00000*((((ALGEBRAIC[37]+ALGEBRAIC[32]) - ( 2.00000*ALGEBRAIC[34])/(CONSTANTS[22] - 2.00000)) - ALGEBRAIC[44])+ALGEBRAIC[39]))/( 2.00000*1.00000*CONSTANTS[47]*CONSTANTS[2])) * 1.0E-03; RATES[7] = (ALGEBRAIC[3]*(1.00000 - STATES[7]) - ALGEBRAIC[13]*STATES[7]) * 1.0E-03; RATES[8] = (ALGEBRAIC[14]*(1.00000 - STATES[8]) - ALGEBRAIC[20]*STATES[8]) * 1.0E-03; RATES[9] = (ALGEBRAIC[5]*(1.00000 - STATES[9]) - ALGEBRAIC[15]*STATES[9]) * 1.0E-03; RATES[10] = (ALGEBRAIC[16]*(1.00000 - STATES[10]) - ALGEBRAIC[21]*STATES[10]) * 1.0E-03; RATES[11] = (ALGEBRAIC[17]*(1.00000 - STATES[11]) - ALGEBRAIC[22]*STATES[11]) * 1.0E-03; RATES[12] = (CONSTANTS[31] - STATES[12]*(CONSTANTS[31]+ALGEBRAIC[8])) * 1.0E-03; RATES[13] = (( 1.00000*(ALGEBRAIC[39] - ALGEBRAIC[41]))/( 2.00000*1.00000*CONSTANTS[48]*CONSTANTS[2])) * 1.0E-03; RATES[14] = (( 1.00000*(ALGEBRAIC[41] - ALGEBRAIC[44]))/( 2.00000*1.00000*CONSTANTS[49]*CONSTANTS[2])) * 1.0E-03; RATES[15] = (ALGEBRAIC[9]*(1.00000 - STATES[15]) - ALGEBRAIC[18]*STATES[15]) * 1.0E-03; rDY_[0] = RATES[0]; rDY_[1] = RATES[1]; rDY_[2] = RATES[2]; rDY_[3] = RATES[3]; rDY_[4] = RATES[4]; rDY_[5] = RATES[5]; rDY_[6] = RATES[6]; rDY_[7] = RATES[7]; rDY_[8] = RATES[8]; rDY_[9] = RATES[9]; rDY_[10] = RATES[10]; rDY_[11] = RATES[11]; rDY_[12] = RATES[12]; rDY_[13] = RATES[13]; rDY_[14] = RATES[14]; rDY_[15] = RATES[15]; //printf("\nInside\n"); //for (uint32_t i = 0; i < 10; i++) // printf("%g\n",sv[i]); /* //State variables const real V_old_ = sv[0]; const real kc_old_ = sv[1]; const real ki_old_ = sv[2]; const real nai_old_ = sv[3]; const real y_old_ = sv[4]; const real x_old_ = sv[5]; const real cai_old_ = sv[6]; const real s_old_ = sv[7]; const real m_old_ = sv[8]; const real h_old_ = sv[9]; const real d_old_ = sv[10]; const real f_old_ = sv[11]; const real f2_old_ = sv[12]; const real ca_up_old_ = sv[13]; const real ca_rel_old_ = sv[14]; const real p_old_ = sv[15]; //Parameters const real R = 8314.472; const real T = 310; const real F = 96485.3415; const real C = 0.075; const real i_pulse = 0; const real g_f_Na = 3; const real g_f_K = 3; const real Km_f = 45; const real Nao = 140; const real delta_y = 1e-5; const real i_K_max = 180; const real g_K1 = 920; const real Km_K1 = 210; const real Km_to = 10; const real Km_Ca = 0.0005; const real g_to = 0.28; const real g_Nab = 0.18; const real g_Cab = 0.02; const real Cao = 2; const real I_p = 125; const real K_mK = 1; const real K_mNa = 40; const real n_NaCa = 3; const real K_NaCa = 0.02; const real d_NaCa = 0.001; const real gamma = 0.5; const real g_Na = 750; const real delta_m = 1e-5; const real P_si = 15; const real delta_d = 0.0001; const real delta_f = 0.0001; const real alpha_f2 = 5; const real K_mf2 = 0.001; const real radius = 0.05; const real length = 2; const real V_e_ratio = 0.1; const real Ca_up_max = 5; const real K_mCa = 0.001; const real tau_up = 0.025; const real tau_rep = 2; const real tau_rel = 0.05; const real rCa = 2; const real Ve = 0.00157; const real Kb = 4; const real pf = 0.7; const real RTONF = ( R*T)/F; const real V_cell = 3.14159*pow(radius, 2.00000)*length; const real Vi = V_cell*(1.00000 - V_e_ratio); const real V_up = Vi*0.0500000; const real V_rel = Vi*0.0200000; // Algebraics const real beta_f2 = ( cai_old_*alpha_f2)/K_mf2; const real alpha_x = ( 0.500000*exp( 0.0826000*(V_old_+50.0000)))/(1.00000+exp( 0.0570000*(V_old_+50.0000))); const real beta_x = ( 1.30000*exp( - 0.0600000*(V_old_+20.0000)))/(1.00000+exp( - 0.0400000*(V_old_+20.0000))); const real alpha_s = 0.0330000*exp(- V_old_/17.0000); const real beta_s = 33.0000/(1.00000+exp(- (V_old_+10.0000)/8.00000)); const real alpha_h = 20.0000*exp( - 0.125000*(V_old_+75.0000)); const real beta_h = 2000.00/( 320.000*exp( - 0.100000*(V_old_+75.0000))+1.00000); const real alpha_p = ( 0.625000*(V_old_+34.0000))/(exp((V_old_+34.0000)/4.00000) - 1.00000); const real beta_p = 5.00000/(1.00000+exp(( - 1.00000*(V_old_+34.0000))/4.00000)); const real alpha_y = 0.0500000*exp( - 0.0670000*((V_old_+52.0000) - 10.0000)); const real E0_y = (V_old_+52.0000) - 10.0000; const real beta_y = (fabs(E0_y)<delta_y ? 2.50000 : ( 1.00000*E0_y)/(1.00000 - exp( - 0.200000*E0_y))); const real E0_m = V_old_+41.0000; const real alpha_m = (fabs(E0_m)<delta_m ? 2000.00 : ( 200.000*E0_m)/(1.00000 - exp( - 0.100000*E0_m))); const real beta_m = 8000.00*exp( - 0.0560000*(V_old_+66.0000)); const real E0_d = (V_old_+24.0000) - 5.00000; const real alpha_d = (fabs(E0_d)<delta_d ? 120.000 : ( 30.0000*E0_d)/(1.00000 - exp(( - 1.00000*E0_d)/4.00000))); const real beta_d = (fabs(E0_d)<delta_d ? 120.000 : ( 12.0000*E0_d)/(exp(E0_d/10.0000) - 1.00000)); const real E0_f = V_old_+34.0000; const real alpha_f = (fabs(E0_f)<delta_f ? 25.0000 : ( 6.25000*E0_f)/(exp(E0_f/4.00000) - 1.00000)); const real beta_f = 50.0000/(1.00000+exp(( - 1.00000*(V_old_+34.0000))/4.00000)); const real E_Na = RTONF*log(Nao/nai_old_); const real i_Na_b = g_Nab*(V_old_ - E_Na); const real i_p = ( (( I_p*kc_old_)/(K_mK+kc_old_))*nai_old_)/(K_mNa+nai_old_); const real i_NaCa = ( K_NaCa*( exp(( gamma*(n_NaCa - 2.00000)*V_old_)/RTONF)*pow(nai_old_, n_NaCa)*Cao - exp(( (gamma - 1.00000)*(n_NaCa - 2.00000)*V_old_)/RTONF)*pow(Nao, n_NaCa)*cai_old_))/( (1.00000+ d_NaCa*( cai_old_*pow(Nao, n_NaCa)+ Cao*pow(nai_old_, n_NaCa)))*(1.00000+cai_old_/0.00690000)); const real E_mh = RTONF*log((Nao+ 0.120000*kc_old_)/(nai_old_+ 0.120000*ki_old_)); const real i_Na = g_Na*pow(m_old_, 3.00000)*h_old_*(V_old_ - E_mh); const real i_fNa = (( y_old_*kc_old_)/(kc_old_+Km_f))*g_f_Na*(V_old_ - E_Na); const real i_siNa = (( 0.0100000*P_si*(V_old_ - 50.0000))/( RTONF*(1.00000 - exp(( - 1.00000*(V_old_ - 50.0000))/RTONF))))*( nai_old_*exp(50.0000/RTONF) - Nao*exp(( - 1.00000*(V_old_ - 50.0000))/RTONF))*d_old_*f_old_*f2_old_; const real i_up = (( 2.00000*1.00000*Vi*F)/( 1.00000*tau_up*Ca_up_max))*cai_old_*(Ca_up_max - ca_up_old_); const real i_tr = (( 2.00000*1.00000*V_rel*F)/( 1.00000*tau_rep))*p_old_*(ca_up_old_ - ca_rel_old_); const real I_K = ( i_K_max*(ki_old_ - kc_old_*exp(- V_old_/RTONF)))/140.000; const real i_K = x_old_*I_K; const real E_K = RTONF*log(kc_old_/ki_old_); const real i_K1 = ( (( g_K1*kc_old_)/(kc_old_+Km_K1))*(V_old_ - E_K))/(1.00000+exp(( ((V_old_+10.0000) - E_K)*2.00000)/RTONF)); const real i_to = (( (( s_old_*g_to*(0.200000+kc_old_/(Km_to+kc_old_))*cai_old_)/(Km_Ca+cai_old_))*(V_old_+10.0000))/(1.00000 - exp( - 0.200000*(V_old_+10.0000))))*( ki_old_*exp(( 0.500000*V_old_)/RTONF) - kc_old_*exp(( - 0.500000*V_old_)/RTONF)); const real i_fK = (( y_old_*kc_old_)/(kc_old_+Km_f))*g_f_K*(V_old_ - E_K); const real i_siK = (( 0.0100000*P_si*(V_old_ - 50.0000))/( RTONF*(1.00000 - exp(( - 1.00000*(V_old_ - 50.0000))/RTONF))))*( ki_old_*exp(50.0000/RTONF) - kc_old_*exp(( - 1.00000*(V_old_ - 50.0000))/RTONF))*d_old_*f_old_*f2_old_; const real i_mK = (i_K1+i_K+i_fK+i_siK+i_to) - 2.00000*i_p; const real i_f = i_fNa+i_fK; const real E_Ca = 0.500000*RTONF*log(Cao/cai_old_); const real i_Ca_b = g_Cab*(V_old_ - E_Ca); const real i_siCa = (( 4.00000*P_si*(V_old_ - 50.0000))/( RTONF*(1.00000 - exp(( - 1.00000*(V_old_ - 50.0000)*2.00000)/RTONF))))*( cai_old_*exp(100.000/RTONF) - Cao*exp(( - 2.00000*(V_old_ - 50.0000))/RTONF))*d_old_*f_old_*f2_old_; const real i_si = i_siCa+i_siK+i_siNa; const real i_rel = ( (( 2.00000*1.00000*V_rel*F)/( 1.00000*tau_rel))*ca_rel_old_*pow(cai_old_, rCa))/(pow(cai_old_, rCa)+pow(K_mCa, rCa)); // Rates of change rDY_[0] = - (i_f+i_K+i_K1+i_to+i_Na_b+i_Ca_b+i_p+i_NaCa+i_Na+i_si+i_pulse)/C; rDY_[1] = - pf*(kc_old_ - Kb)+( 1.00000*i_mK)/( 1.00000*Ve*F); rDY_[2] = ( - 1.00000*i_mK)/( 1.00000*Vi*F); rDY_[3] = ( - 1.00000*(i_Na+i_Na_b+i_fNa+i_siNa+ i_p*3.00000+( i_NaCa*n_NaCa)/(n_NaCa - 2.00000)))/( 1.00000*Vi*F); rDY_[4] = alpha_y*(1.00000 - y_old_) - beta_y*y_old_; rDY_[5] = alpha_x*(1.00000 - x_old_) - beta_x*x_old_; rDY_[6] = ( - 1.00000*((((i_siCa+i_Ca_b) - ( 2.00000*i_NaCa)/(n_NaCa - 2.00000)) - i_rel)+i_up))/( 2.00000*1.00000*Vi*F); rDY_[7] = alpha_s*(1.00000 - s_old_) - beta_s*s_old_; rDY_[8] = alpha_m*(1.00000 - m_old_) - beta_m*m_old_; rDY_[9] = alpha_h*(1.00000 - h_old_) - beta_h*h_old_; rDY_[10] = alpha_d*(1.00000 - d_old_) - beta_d*d_old_; rDY_[11] = alpha_f*(1.00000 - f_old_) - beta_f*f_old_; rDY_[12] = alpha_f2 - f2_old_*(alpha_f2+beta_f2); rDY_[13] = ( 1.00000*(i_up - i_tr))/( 2.00000*1.00000*V_up*F); rDY_[14] = ( 1.00000*(i_tr - i_rel))/( 2.00000*1.00000*V_rel*F); rDY_[15] = alpha_p*(1.00000 - p_old_) - beta_p*p_old_; */ }
convolution_packnto1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_packnto1_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packnto1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 4; const word_type vl = vsetvl_e32m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m1_t _sum = vfmv_v_f_f32m1(0.f, vl); const float* kptr = (const float*)weight_data_packnto1.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat32m1_t _val = vle32_v_f32m1(sptr + space_ofs[k] * packn, vl); vfloat32m1_t _w = vle32_v_f32m1(kptr, vl); _sum = vfmacc_vv_f32m1(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f32m1_f32(vfredusum_vs_f32m1_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
GB_unaryop__abs_fp64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_uint16 // op(A') function: GB_tran__abs_fp64_uint16 // C type: double // A type: uint16_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_uint16 ( double *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lxor_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__lxor_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint16) // A*D function (colscale): GB (_AxD__lxor_uint16) // D*A function (rowscale): GB (_DxB__lxor_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint16) // C=scalar+B GB (_bind1st__lxor_uint16) // C=scalar+B' GB (_bind1st_tran__lxor_uint16) // C=A+scalar GB (_bind2nd__lxor_uint16) // C=A'+scalar GB (_bind2nd_tran__lxor_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_UINT16 || GxB_NO_LXOR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lxor_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rose_v1_scalar_privatization.c
/* a local variable to transfer temp value * It introduces "fake" data dependence since the variable is local to each iteration * */ #include <omp.h> int a[100]; int b[100]; void foo() { int i; #pragma omp parallel for private (i) for (i = 0; i <= 99; i += 1) { int tmp; tmp = a[i] + i; b[i] = tmp; } } /* *-------------Dump the dependence graph for the first loop in a function body!------------ // Output dependence // Loop-carried ,why CarryLevel =1???? dep SgExprStatement:tmp =((a[i]) + i); SgExprStatement:tmp =((a[i]) + i); 1*1 SCALAR_DEP; commonlevel = 1 CarryLevel = 1 SgVarRefExp:tmp@11:9->SgVarRefExp:tmp@11:9 == 0;||:: //True dependence for both //a) loop independent (within an iteration) and //b) loop carried (across iterations) : This is sure thing if a) holds dep SgExprStatement:tmp =((a[i]) + i); SgExprStatement:b[i] = tmp; 1*1 SCALAR_DEP; commonlevel = 1 CarryLevel = 1 SgVarRefExp:tmp@11:9->SgVarRefExp:tmp@12:12 == 0;||:: //Anti dependence //Loop carried(BACK_DEP) scalar dep SgExprStatement:b[i] = tmp; SgExprStatement:tmp =((a[i]) + i); 1*1 SCALAR_BACK_DEP; commonlevel = 1 CarryLevel = 0 SgVarRefExp:tmp@12:12->SgVarRefExp:tmp@11:9 <= -1;||:: */
GrB_Scalar_nvals.c
//------------------------------------------------------------------------------ // GrB_Scalar_nvals: number of entries in a sparse GrB_Scalar //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GrB_Scalar_nvals // get the number of entries in a GrB_Scalar ( GrB_Index *nvals, // number of entries (1 or 0) const GrB_Scalar s // GrB_Scalar to query ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GrB_Scalar_nvals (&nvals, s)") ; GB_RETURN_IF_NULL_OR_FAULTY (s) ; ASSERT (GB_SCALAR_OK (s)) ; //-------------------------------------------------------------------------- // get the number of entries //-------------------------------------------------------------------------- GrB_Info info = GB_nvals (nvals, (GrB_Matrix) s, Context) ; #pragma omp flush return (info) ; } //------------------------------------------------------------------------------ // GxB_Scalar_nvals: number of entries in a sparse GrB_Scalar (historical) //------------------------------------------------------------------------------ GrB_Info GxB_Scalar_nvals // get the number of entries in a GrB_Scalar ( GrB_Index *nvals, // number of entries (1 or 0) const GrB_Scalar s // GrB_Scalar to query ) { return (GrB_Scalar_nvals (nvals, s)) ; }
GB_binop__isge_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isge_fp64 // A.*B function (eWiseMult): GB_AemultB__isge_fp64 // A*D function (colscale): GB_AxD__isge_fp64 // D*A function (rowscale): GB_DxB__isge_fp64 // C+=B function (dense accum): GB_Cdense_accumB__isge_fp64 // C+=b function (dense accum): GB_Cdense_accumb__isge_fp64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isge_fp64 // C=scalar+B GB_bind1st__isge_fp64 // C=scalar+B' GB_bind1st_tran__isge_fp64 // C=A+scalar GB_bind2nd__isge_fp64 // C=A'+scalar GB_bind2nd_tran__isge_fp64 // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ double bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_FP64 || GxB_NO_ISGE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isge_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isge_fp64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isge_fp64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isge_fp64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isge_fp64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *GB_RESTRICT Cx = (double *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isge_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isge_fp64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isge_fp64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isge_fp64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__isge_fp64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__isge_fp64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include <unistd.h> #include <omp.h> #include <stdlib.h> #include <stdio.h> #include <string.h> int N_BUCKETS; struct Bucket { int* array; size_t n_elem; size_t max_elem; }; struct Bucket* make(size_t max){ struct Bucket* block_arr = malloc(sizeof(struct Bucket)); block_arr->array = malloc(sizeof(int) * max); block_arr->n_elem = 0; block_arr->max_elem = max; return block_arr; } void free_bucket(struct Bucket* b){ free(b->array); free(b); } __inline__ void buckets_to_arr(size_t size, struct Bucket* arr, int* res) { memcpy(res, arr->array, arr->n_elem * sizeof(int)); } void insert(struct Bucket* arr, int elem){ if(arr->n_elem >= arr->max_elem){ arr->array = realloc(arr->array, sizeof(int) * arr->max_elem * 2); arr->max_elem *= 2; } arr->array[arr->n_elem] = elem; arr->n_elem++; } int cmpfunc (const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } void bucket_sort(size_t size, int* arr){ if (!size){ fprintf(stderr, "Can't calculate max of empty array"); _exit(1); } int max = arr[0]; int min = arr[0]; for(size_t i = 1; i < size; i++) { //vectorized if(arr[i] > max) max = arr[i]; if(arr[i] < min) min = arr[i]; } #ifdef NDEBUG fprintf(stderr, "max: %d, min: %d\n", max, min); #endif struct Bucket** buckets; #pragma omp parallel { #pragma omp single { N_BUCKETS = omp_get_num_threads() * 2; N_BUCKETS = N_BUCKETS > 32 ? 32 : N_BUCKETS; buckets = malloc(sizeof(struct Bucket*) * N_BUCKETS); for (size_t i = 0; i < N_BUCKETS; i++) buckets[i] = make(size); } #pragma omp for for (size_t i = 0; i < N_BUCKETS; i++) { for (size_t j = 0; j < size; j++){ size_t n_bucket = (arr[j] + abs(min)) * N_BUCKETS / (abs(max + abs(min))); n_bucket = n_bucket >= N_BUCKETS ? N_BUCKETS - 1 : n_bucket; if(n_bucket == i) { #ifdef NDEBUG fprintf(stderr, "%zu<- %d\n", i, arr[j]); #endif buckets[i]->array[buckets[i]->n_elem++] = arr[j]; } } qsort(buckets[i]->array, buckets[i]->n_elem, sizeof(int), cmpfunc); } } size_t i = 0; for(size_t j = 0; j < N_BUCKETS; j++) { memcpy(arr + i, buckets[j]->array, buckets[j]->n_elem * sizeof(int)); i += buckets[j]->n_elem; } } void print_arr(int* arr, size_t size){ for(size_t i = 0; i < size; i++) { arr[i] = arr[i]; printf("%d\n", arr[i]); } } int main(int argc, char** argv){ struct Bucket* b = make(100); FILE* file = fopen (argv[1], "r"); int v; while (!feof (file)){ fscanf (file, "%d\n", &v); insert(b, v); } size_t size = b->n_elem; #ifdef NDEBUG print_arr(b->array, size); #endif double time = omp_get_wtime(); bucket_sort(size, b->array); fprintf(stderr, "Time=%f\n", omp_get_wtime()-time); print_arr(b->array, size); free_bucket(b); }
feature_group.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for * license information. */ #ifndef LIGHTGBM_FEATURE_GROUP_H_ #define LIGHTGBM_FEATURE_GROUP_H_ #include <LightGBM/bin.h> #include <LightGBM/meta.h> #include <LightGBM/utils/random.h> #include <cstdio> #include <memory> #include <vector> namespace LightGBM { class Dataset; class DatasetLoader; struct TrainingShareStates; class MultiValBinWrapper; /*! \brief Using to store data and providing some operations on one feature * group*/ class FeatureGroup { public: friend Dataset; friend DatasetLoader; friend TrainingShareStates; friend MultiValBinWrapper; /*! * \brief Constructor * \param num_feature number of features of this group * \param bin_mappers Bin mapper for features * \param num_data Total number of data * \param is_enable_sparse True if enable sparse feature */ FeatureGroup(int num_feature, int8_t is_multi_val, std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data, int group_id) : num_feature_(num_feature), is_multi_val_(is_multi_val > 0), is_sparse_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), num_feature); auto& ref_bin_mappers = *bin_mappers; double sum_sparse_rate = 0.0f; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); sum_sparse_rate += bin_mappers_.back()->sparse_rate(); } sum_sparse_rate /= num_feature_; int offset = 1; is_dense_multi_val_ = false; if (sum_sparse_rate < MultiValBin::multi_val_bin_sparse_threshold && is_multi_val_) { // use dense multi val bin offset = 0; is_dense_multi_val_ = true; } // use bin at zero to store most_freq_bin only when not using dense multi val bin num_total_bin_ = offset; // however, we should force to leave one bin, if dense multi val bin is the first bin // and its first feature has most freq bin > 0 if (group_id == 0 && num_feature_ > 0 && is_dense_multi_val_ && bin_mappers_[0]->GetMostFreqBin() > 0) { num_total_bin_ = 1; } bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, is_multi_val_, true, false); } FeatureGroup(const FeatureGroup& other, int num_data) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_dense_multi_val_ = other.is_dense_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } CreateBinData(num_data, is_multi_val_, !is_sparse_, is_sparse_); } FeatureGroup(std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(1), is_multi_val_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), 1); // use bin at zero to store default_bin num_total_bin_ = 1; is_dense_multi_val_ = false; bin_offsets_.emplace_back(num_total_bin_); auto& ref_bin_mappers = *bin_mappers; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, false, false, false); } /*! * \brief Constructor from memory * \param memory Pointer of memory * \param num_all_data Number of global data * \param local_used_indices Local used indices, empty means using all data */ FeatureGroup(const void* memory, data_size_t num_all_data, const std::vector<data_size_t>& local_used_indices, int group_id) { const char* memory_ptr = reinterpret_cast<const char*>(memory); // get is_sparse is_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_multi_val_)); is_dense_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_dense_multi_val_)); is_sparse_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(is_sparse_)); num_feature_ = *(reinterpret_cast<const int*>(memory_ptr)); memory_ptr += VirtualFileWriter::AlignedSize(sizeof(num_feature_)); // get bin mapper bin_mappers_.clear(); for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(new BinMapper(memory_ptr)); memory_ptr += bin_mappers_[i]->SizesInByte(); } bin_offsets_.clear(); int offset = 1; if (is_dense_multi_val_) { offset = 0; } // use bin at zero to store most_freq_bin only when not using dense multi val bin num_total_bin_ = offset; // however, we should force to leave one bin, if dense multi val bin is the first bin // and its first feature has most freq bin > 0 if (group_id == 0 && num_feature_ > 0 && is_dense_multi_val_ && bin_mappers_[0]->GetMostFreqBin() > 0) { num_total_bin_ = 1; } bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } data_size_t num_data = num_all_data; if (!local_used_indices.empty()) { num_data = static_cast<data_size_t>(local_used_indices.size()); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin( num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back( Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } multi_bin_data_.back()->LoadFromMemory(memory_ptr, local_used_indices); memory_ptr += multi_bin_data_.back()->SizesInByte(); } } else { if (is_sparse_) { bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } // get bin data bin_data_->LoadFromMemory(memory_ptr, local_used_indices); } } /*! \brief Destructor */ ~FeatureGroup() {} /*! * \brief Push one record, will auto convert to bin and push to bin data * \param tid Thread id * \param idx Index of record * \param value feature value of record */ inline void PushData(int tid, int sub_feature_idx, data_size_t line_idx, double value) { uint32_t bin = bin_mappers_[sub_feature_idx]->ValueToBin(value); if (bin == bin_mappers_[sub_feature_idx]->GetMostFreqBin()) { return; } if (bin_mappers_[sub_feature_idx]->GetMostFreqBin() == 0) { bin -= 1; } if (is_multi_val_) { multi_bin_data_[sub_feature_idx]->Push(tid, line_idx, bin + 1); } else { bin += bin_offsets_[sub_feature_idx]; bin_data_->Push(tid, line_idx, bin); } } void ReSize(int num_data) { if (!is_multi_val_) { bin_data_->ReSize(num_data); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->ReSize(num_data); } } } inline void CopySubrow(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices) { if (!is_multi_val_) { bin_data_->CopySubrow(full_feature->bin_data_.get(), used_indices, num_used_indices); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->CopySubrow(full_feature->multi_bin_data_[i].get(), used_indices, num_used_indices); } } } void AddFeaturesFrom(const FeatureGroup* other, int group_id) { CHECK(is_multi_val_); CHECK(other->is_multi_val_); // every time when new features are added, we need to reconsider sparse or dense double sum_sparse_rate = 0.0f; for (int i = 0; i < num_feature_; ++i) { sum_sparse_rate += bin_mappers_[i]->sparse_rate(); } for (int i = 0; i < other->num_feature_; ++i) { sum_sparse_rate += other->bin_mappers_[i]->sparse_rate(); } sum_sparse_rate /= (num_feature_ + other->num_feature_); int offset = 1; is_dense_multi_val_ = false; if (sum_sparse_rate < MultiValBin::multi_val_bin_sparse_threshold && is_multi_val_) { // use dense multi val bin offset = 0; is_dense_multi_val_ = true; } bin_offsets_.clear(); num_total_bin_ = offset; // however, we should force to leave one bin, if dense multi val bin is the first bin // and its first feature has most freq bin > 0 if (group_id == 0 && num_feature_ > 0 && is_dense_multi_val_ && bin_mappers_[0]->GetMostFreqBin() > 0) { num_total_bin_ = 1; } bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } for (int i = 0; i < other->num_feature_; ++i) { const auto& other_bin_mapper = other->bin_mappers_[i]; bin_mappers_.emplace_back(new BinMapper(*other_bin_mapper)); auto num_bin = other_bin_mapper->num_bin(); if (other_bin_mapper->GetMostFreqBin() == 0) { num_bin -= offset; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); multi_bin_data_.emplace_back(other->multi_bin_data_[i]->Clone()); } num_feature_ += other->num_feature_; } inline BinIterator* SubFeatureIterator(int sub_feature) { uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t min_bin = 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; return multi_bin_data_[sub_feature]->GetIterator(min_bin, max_bin, most_freq_bin); } } inline void FinishLoad() { if (is_multi_val_) { OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_feature_; ++i) { OMP_LOOP_EX_BEGIN(); multi_bin_data_[i]->FinishLoad(); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { bin_data_->FinishLoad(); } } inline BinIterator* FeatureGroupIterator() { if (is_multi_val_) { return nullptr; } uint32_t min_bin = bin_offsets_[0]; uint32_t max_bin = bin_offsets_.back() - 1; uint32_t most_freq_bin = 0; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } inline size_t FeatureGroupSizesInByte() { return bin_data_->SizesInByte(); } inline void* FeatureGroupData() { if (is_multi_val_) { return nullptr; } return bin_data_->get_data(); } inline data_size_t Split(int sub_feature, const uint32_t* threshold, int num_threshold, bool default_left, const data_size_t* data_indices, data_size_t cnt, data_size_t* lte_indices, data_size_t* gt_indices) const { uint32_t default_bin = bin_mappers_[sub_feature]->GetDefaultBin(); uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); if (num_feature_ == 1) { return bin_data_->Split(max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->Split(min_bin, max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } } else { if (num_feature_ == 1) { return bin_data_->SplitCategorical(max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->SplitCategorical( min_bin, max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); return multi_bin_data_[sub_feature]->Split( max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return multi_bin_data_[sub_feature]->SplitCategorical( max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } /*! * \brief From bin to feature value * \param bin * \return FeatureGroup value of this bin */ inline double BinToValue(int sub_feature_idx, uint32_t bin) const { return bin_mappers_[sub_feature_idx]->BinToValue(bin); } /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const { writer->AlignedWrite(&is_multi_val_, sizeof(is_multi_val_)); writer->AlignedWrite(&is_dense_multi_val_, sizeof(is_dense_multi_val_)); writer->AlignedWrite(&is_sparse_, sizeof(is_sparse_)); writer->AlignedWrite(&num_feature_, sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { bin_mappers_[i]->SaveBinaryToFile(writer); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->SaveBinaryToFile(writer); } } else { bin_data_->SaveBinaryToFile(writer); } } /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const { size_t ret = VirtualFileWriter::AlignedSize(sizeof(is_multi_val_)) + VirtualFileWriter::AlignedSize(sizeof(is_dense_multi_val_)) + VirtualFileWriter::AlignedSize(sizeof(is_sparse_)) + VirtualFileWriter::AlignedSize(sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { ret += bin_mappers_[i]->SizesInByte(); } if (!is_multi_val_) { ret += bin_data_->SizesInByte(); } else { for (int i = 0; i < num_feature_; ++i) { ret += multi_bin_data_[i]->SizesInByte(); } } return ret; } /*! \brief Disable copy */ FeatureGroup& operator=(const FeatureGroup&) = delete; /*! \brief Deep copy */ FeatureGroup(const FeatureGroup& other, bool should_handle_dense_mv, int group_id) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_dense_multi_val_ = other.is_dense_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } if (!is_multi_val_) { bin_data_.reset(other.bin_data_->Clone()); } else { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { multi_bin_data_.emplace_back(other.multi_bin_data_[i]->Clone()); } } if (should_handle_dense_mv && is_dense_multi_val_ && group_id > 0) { // this feature group was the first feature group, but now no longer is, // so we need to eliminate its special empty bin for multi val dense bin if (bin_mappers_[0]->GetMostFreqBin() > 0 && bin_offsets_[0] == 1) { for (size_t i = 0; i < bin_offsets_.size(); ++i) { bin_offsets_[i] -= 1; } num_total_bin_ -= 1; } } } private: void CreateBinData(int num_data, bool is_multi_val, bool force_dense, bool force_sparse) { if (is_multi_val) { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin( num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back( Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } } is_multi_val_ = true; } else { if (force_sparse || (!force_dense && num_feature_ == 1 && bin_mappers_[0]->sparse_rate() >= kSparseThreshold)) { is_sparse_ = true; bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { is_sparse_ = false; bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } is_multi_val_ = false; } } /*! \brief Number of features */ int num_feature_; /*! \brief Bin mapper for sub features */ std::vector<std::unique_ptr<BinMapper>> bin_mappers_; /*! \brief Bin offsets for sub features */ std::vector<uint32_t> bin_offsets_; /*! \brief Bin data of this feature */ std::unique_ptr<Bin> bin_data_; std::vector<std::unique_ptr<Bin>> multi_bin_data_; /*! \brief True if this feature is sparse */ bool is_multi_val_; bool is_dense_multi_val_; bool is_sparse_; int num_total_bin_; }; } // namespace LightGBM #endif // LIGHTGBM_FEATURE_GROUP_H_
for-18.c
/* { dg-do compile } */ /* { dg-options "-O -fopenmp -fdump-tree-ompexp" } */ /* LLVM LOCAL test not applicable */ /* { dg-require-fdump "" } */ void foo (int *a, int i) { int j, k = 1, l = 30, m = 4; #pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4) for (j = 0; j <= l; j++) a[j] = 1; #pragma omp parallel for num_threads (3 * i) schedule (dynamic, i * 4) for (j = k; j <= l; j += (m - 1)) a[j] = 2; #pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4) for (j = 0; j <= l; j++) a[j] = 3; #pragma omp parallel for num_threads (3 * i) schedule (dynamic, 4) for (j = k; j <= l; j += (m - 1)) a[j] = 4; } void bar (int *a, int i) { int j, k = 1, l = 30, m = 4; #pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4) for (j = 0; j <= l; j++) a[j] = 1; #pragma omp parallel for num_threads (3 * i) schedule (guided, i * 4) for (j = k; j <= l; j += (m - 1)) a[j] = 2; #pragma omp parallel for num_threads (3 * i) schedule (guided, 4) for (j = 0; j <= l; j++) a[j] = 3; #pragma omp parallel for num_threads (3 * i) schedule (guided, 4) for (j = k; j <= l; j += (m - 1)) a[j] = 4; } /* { dg-final { scan-tree-dump-times "GOMP_parallel_loop_dynamic_start" 4 "ompexp" { xfail *-*-* } } } */ /* { dg-final { scan-tree-dump-times "GOMP_parallel_loop_guided_start" 4 "ompexp" { xfail *-*-* } } } */ /* { dg-final { cleanup-tree-dump "ompexp" } } */
GB_binop__bxor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_03__bxor_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int32) // C=scalar+B GB (_bind1st__bxor_int32) // C=scalar+B' GB (_bind1st_tran__bxor_int32) // C=A+scalar GB (_bind2nd__bxor_int32) // C=A'+scalar GB (_bind2nd_tran__bxor_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT32 || GxB_NO_BXOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bxor_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bxor_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int16_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_int32 // op(A') function: GB_tran__abs_int16_int32 // C type: int16_t // A type: int32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_int32 ( int16_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
QuadNode.h
/* * QuadNode.h * * Created on: 21.05.2014 * Author: Moritz v. Looz ([email protected]) */ #ifndef QUADNODE_H_ #define QUADNODE_H_ #include <vector> #include <algorithm> #include <functional> #include <assert.h> #include "../../auxiliary/Log.h" #include "../../auxiliary/Parallel.h" #include "../../geometric/HyperbolicSpace.h" using std::vector; using std::min; using std::max; using std::cos; namespace NetworKit { template <class T, bool poincare = true> class QuadNode { friend class QuadTreeGTest; private: double leftAngle; double minR; double rightAngle; double maxR; Point2D<double> a,b,c,d; unsigned capacity; static const unsigned coarsenLimit = 4; count subTreeSize; std::vector<T> content; std::vector<Point2D<double> > positions; std::vector<double> angles; std::vector<double> radii; bool isLeaf; bool splitTheoretical; double alpha; double balance; index ID; double lowerBoundR; public: std::vector<QuadNode> children; QuadNode() { //This should never be called. leftAngle = 0; rightAngle = 0; minR = 0; maxR = 0; capacity = 20; isLeaf = true; subTreeSize = 0; balance = 0.5; splitTheoretical = false; alpha = 1; lowerBoundR = maxR; ID = 0; } /** * Construct a QuadNode for polar coordinates. * * * @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\pi * @param minR Minimal radial coordinate of region, between 0 and 1 * @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\pi * @param maxR Maximal radial coordinate of region, between 0 and 1 * @param capacity Number of points a leaf cell can store before splitting * @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0 * @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times * @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true * @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use * */ QuadNode(double leftAngle, double minR, double rightAngle, double maxR, unsigned capacity = 1000, bool splitTheoretical = false, double alpha = 1, double balance = 0.5) { if (balance <= 0 || balance >= 1) throw std::runtime_error("Quadtree balance parameter must be between 0 and 1."); if (poincare && maxR > 1) throw std::runtime_error("The Poincare disk has a radius of 1, cannot create quadtree larger than that!"); this->leftAngle = leftAngle; this->minR = minR; this->maxR = maxR; this->rightAngle = rightAngle; this->a = HyperbolicSpace::polarToCartesian(leftAngle, minR); this->b = HyperbolicSpace::polarToCartesian(rightAngle, minR); this->c = HyperbolicSpace::polarToCartesian(rightAngle, maxR); this->d = HyperbolicSpace::polarToCartesian(leftAngle, maxR); this->capacity = capacity; this->alpha = alpha; this->splitTheoretical = splitTheoretical; this->balance = balance; this->lowerBoundR = maxR; this->ID = 0; isLeaf = true; subTreeSize = 0; } void split() { assert(isLeaf); //heavy lifting: split up! double middleAngle = (rightAngle - leftAngle) / 2 + leftAngle; /** * we want to make sure the space is evenly divided to obtain a balanced tree * Simply halving the radius will cause a larger space for the outer Quadnode, resulting in an unbalanced tree */ double middleR; if (poincare) { if (splitTheoretical) { double hyperbolicOuter = HyperbolicSpace::EuclideanRadiusToHyperbolic(maxR); double hyperbolicInner = HyperbolicSpace::EuclideanRadiusToHyperbolic(minR); double hyperbolicMiddle = acosh((1-balance)*cosh(alpha*hyperbolicOuter) + balance*cosh(alpha*hyperbolicInner))/alpha; middleR = HyperbolicSpace::hyperbolicRadiusToEuclidean(hyperbolicMiddle); } else { double nom = maxR - minR; double denom = pow((1-maxR*maxR)/(1-minR*minR), 0.5)+1; middleR = nom/denom + minR; } } else { middleR = acosh((1-balance)*cosh(alpha*maxR) + balance*cosh(alpha*minR))/alpha; } //one could also use the median here. Results in worse asymptotical complexity, but maybe better runtime? assert(middleR < maxR); assert(middleR > minR); QuadNode<index,poincare> southwest(leftAngle, minR, middleAngle, middleR, capacity, splitTheoretical, alpha, balance); QuadNode<index,poincare> southeast(middleAngle, minR, rightAngle, middleR, capacity, splitTheoretical, alpha, balance); QuadNode<index,poincare> northwest(leftAngle, middleR, middleAngle, maxR, capacity, splitTheoretical, alpha, balance); QuadNode<index,poincare> northeast(middleAngle, middleR, rightAngle, maxR, capacity, splitTheoretical, alpha, balance); children = {southwest, southeast, northwest, northeast}; isLeaf = false; } /** * Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full * * @param input arbitrary content, in our case an index * @param angle angular coordinate of point, between 0 and 2 pi. * @param R radial coordinate of point, between 0 and 1. */ void addContent(T input, double angle, double R) { assert(this->responsible(angle, R)); if (lowerBoundR > R) lowerBoundR = R; if (isLeaf) { if (content.size() + 1 < capacity) { content.push_back(input); angles.push_back(angle); radii.push_back(R); Point2D<double> pos = HyperbolicSpace::polarToCartesian(angle, R); positions.push_back(pos); } else { split(); for (index i = 0; i < content.size(); i++) { this->addContent(content[i], angles[i], radii[i]); } assert(subTreeSize == content.size());//we have added everything twice subTreeSize = content.size(); content.clear(); angles.clear(); radii.clear(); positions.clear(); this->addContent(input, angle, R); } } else { assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (children[i].responsible(angle, R)) { children[i].addContent(input, angle, R); break; } } subTreeSize++; } } /** * Remove content at polar coordinates (angle, R). May cause coarsening of the quadtree * * @param input Content to be removed * @param angle Angular coordinate * @param R Radial coordinate * * @return True if content was found and removed, false otherwise */ bool removeContent(T input, double angle, double R) { if (!responsible(angle, R)) return false; if (isLeaf) { index i = 0; for (; i < content.size(); i++) { if (content[i] == input) break; } if (i < content.size()) { assert(angles[i] == angle); assert(radii[i] == R); //remove element content.erase(content.begin()+i); positions.erase(positions.begin()+i); angles.erase(angles.begin()+i); radii.erase(radii.begin()+i); return true; } else { return false; } } else { bool removed = false; bool allLeaves = true; assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (!children[i].isLeaf) allLeaves = false; if (children[i].removeContent(input, angle, R)) { assert(!removed); removed = true; } } if (removed) subTreeSize--; //coarsen? if (removed && allLeaves && size() < coarsenLimit) { //coarsen!! //why not assert empty containers and then insert directly? vector<T> allContent; vector<Point2D<double> > allPositions; vector<double> allAngles; vector<double> allRadii; for (index i = 0; i < children.size(); i++) { allContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end()); allPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end()); allAngles.insert(allAngles.end(), children[i].angles.begin(), children[i].angles.end()); allRadii.insert(allRadii.end(), children[i].radii.begin(), children[i].radii.end()); } assert(subTreeSize == allContent.size()); assert(subTreeSize == allPositions.size()); assert(subTreeSize == allAngles.size()); assert(subTreeSize == allRadii.size()); children.clear(); content.swap(allContent); positions.swap(allPositions); angles.swap(allAngles); radii.swap(allRadii); isLeaf = true; } return removed; } } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * * @param query Center of the Euclidean query circle, given in Cartesian coordinates * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(Point2D<double> query, double radius) const { double phi, r; HyperbolicSpace::cartesianToPolar(query, phi, r); if (responsible(phi, r)) return false; //if using native coordinates, call distance calculation if (!poincare) return hyperbolicDistances(phi, r).first > radius; //get four edge points double topDistance, bottomDistance, leftDistance, rightDistance; if (phi < leftAngle || phi > rightAngle) { topDistance = min(c.distance(query), d.distance(query)); } else { topDistance = abs(r - maxR); } if (topDistance <= radius) return false; if (phi < leftAngle || phi > rightAngle) { bottomDistance = min(a.distance(query), b.distance(query)); } else { bottomDistance = abs(r - minR); } if (bottomDistance <= radius) return false; double minDistanceR = r*cos(abs(phi-leftAngle)); if (minDistanceR > minR && minDistanceR < maxR) { leftDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR)); } else { leftDistance = min(a.distance(query), d.distance(query)); } if (leftDistance <= radius) return false; minDistanceR = r*cos(abs(phi-rightAngle)); if (minDistanceR > minR && minDistanceR < maxR) { rightDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR)); } else { rightDistance = min(b.distance(query), c.distance(query)); } if (rightDistance <= radius) return false; return true; } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * Functionality is the same as in the method above, but it takes polar coordinates instead of Cartesian ones * * @param angle_c Angular coordinate of the Euclidean query circle's center * @param r_c Radial coordinate of the Euclidean query circle's center * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(double angle_c, double r_c, double radius) const { if (responsible(angle_c, r_c)) return false; Point2D<double> query = HyperbolicSpace::polarToCartesian(angle_c, r_c); return outOfReach(query, radius); } /** * @param phi Angular coordinate of query point * @param r_h radial coordinate of query point in poincare disk */ std::pair<double, double> hyperbolicDistances(double phi, double r) const { double minRHyper, maxRHyper, r_h; if (poincare) { minRHyper=HyperbolicSpace::EuclideanRadiusToHyperbolic(this->minR); maxRHyper=HyperbolicSpace::EuclideanRadiusToHyperbolic(this->maxR); r_h = HyperbolicSpace::EuclideanRadiusToHyperbolic(r); } else { minRHyper=this->minR; maxRHyper=this->maxR; r_h = r; } double coshr = cosh(r_h); double sinhr = sinh(r_h); double coshMinR = cosh(minRHyper); double coshMaxR = cosh(maxRHyper); double sinhMinR = sinh(minRHyper); double sinhMaxR = sinh(maxRHyper); double cosDiffLeft = cos(phi - leftAngle); double cosDiffRight = cos(phi - rightAngle); /** * If the query point is not within the quadnode, the distance minimum is on the border. * Need to check whether extremum is between corners: */ double coshMinDistance, coshMaxDistance; //Left border double lowerLeftDistance = coshMinR*coshr-sinhMinR*sinhr*cosDiffLeft; double upperLeftDistance = coshMaxR*coshr-sinhMaxR*sinhr*cosDiffLeft; if (responsible(phi, r)) coshMinDistance = 1; //strictly speaking, this is wrong else coshMinDistance = min(lowerLeftDistance, upperLeftDistance); coshMaxDistance = max(lowerLeftDistance, upperLeftDistance); //double a = cosh(r_h); double b = sinhr*cosDiffLeft; double extremum = log((coshr+b)/(coshr-b))/2; if (extremum < maxRHyper && extremum >= minRHyper) { double extremeDistance = cosh(extremum)*coshr-sinh(extremum)*sinhr*cosDiffLeft; coshMinDistance = min(coshMinDistance, extremeDistance); coshMaxDistance = max(coshMaxDistance, extremeDistance); } /** * cosh is a function from [0,\infty) to [1, \infty) * Variables thus need */ assert(coshMaxDistance >= 1); assert(coshMinDistance >= 1); //Right border double lowerRightDistance = coshMinR*coshr-sinhMinR*sinhr*cosDiffRight; double upperRightDistance = coshMaxR*coshr-sinhMaxR*sinhr*cosDiffRight; coshMinDistance = min(coshMinDistance, lowerRightDistance); coshMinDistance = min(coshMinDistance, upperRightDistance); coshMaxDistance = max(coshMaxDistance, lowerRightDistance); coshMaxDistance = max(coshMaxDistance, upperRightDistance); b = sinhr*cosDiffRight; extremum = log((coshr+b)/(coshr-b))/2; if (extremum < maxRHyper && extremum >= minRHyper) { double extremeDistance = cosh(extremum)*coshr-sinh(extremum)*sinhr*cosDiffRight; coshMinDistance = min(coshMinDistance, extremeDistance); coshMaxDistance = max(coshMaxDistance, extremeDistance); } assert(coshMaxDistance >= 1); assert(coshMinDistance >= 1); //upper and lower borders if (phi >= leftAngle && phi < rightAngle) { double lower = cosh(abs(r_h-minRHyper)); double upper = cosh(abs(r_h-maxRHyper)); coshMinDistance = min(coshMinDistance, lower); coshMinDistance = min(coshMinDistance, upper); coshMaxDistance = max(coshMaxDistance, upper); coshMaxDistance = max(coshMaxDistance, lower); } assert(coshMaxDistance >= 1); assert(coshMinDistance >= 1); //again with mirrored phi double mirrorphi; if (phi >= PI) mirrorphi = phi - PI; else mirrorphi = phi + PI; if (mirrorphi >= leftAngle && mirrorphi < rightAngle) { double lower = coshMinR*coshr+sinhMinR*sinhr; double upper = coshMaxR*coshr+sinhMaxR*sinhr; coshMinDistance = min(coshMinDistance, lower); coshMinDistance = min(coshMinDistance, upper); coshMaxDistance = max(coshMaxDistance, upper); coshMaxDistance = max(coshMaxDistance, lower); } assert(coshMaxDistance >= 1); assert(coshMinDistance >= 1); double minDistance, maxDistance; minDistance = acosh(coshMinDistance); maxDistance = acosh(coshMaxDistance); assert(maxDistance >= 0); assert(minDistance >= 0); return std::pair<double, double>(minDistance, maxDistance); } /** * Does the point at (angle, r) fall inside the region managed by this QuadNode? * * @param angle Angular coordinate of input point * @param r Radial coordinate of input points * * @return True if input point lies within the region of this QuadNode */ bool responsible(double angle, double r) const { return (angle >= leftAngle && angle < rightAngle && r >= minR && r < maxR); } /** * Get all Elements in this QuadNode or a descendant of it * * @return vector of content type T */ std::vector<T> getElements() const { if (isLeaf) { return content; } else { assert(content.size() == 0); assert(angles.size() == 0); assert(radii.size() == 0); vector<T> result; for (index i = 0; i < children.size(); i++) { std::vector<T> subresult = children[i].getElements(); result.insert(result.end(), subresult.begin(), subresult.end()); } return result; } } void getCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const { assert(angles.size() == radii.size()); if (isLeaf) { anglesContainer.insert(anglesContainer.end(), angles.begin(), angles.end()); radiiContainer.insert(radiiContainer.end(), radii.begin(), radii.end()); } else { assert(content.size() == 0); assert(angles.size() == 0); assert(radii.size() == 0); for (index i = 0; i < children.size(); i++) { children[i].getCoordinates(anglesContainer, radiiContainer); } } } /** * Don't use this! * Code is still in here for a unit test. * * Get copy of the leaf cell responsible for a point at (angle, r). * Expensive because it copies the whole subtree, causes assertion failure if called with the wrong arguments * * @param angle Angular coordinate of point * @param r Radial coordinate of point * * @return Copy of leaf cell containing point, or dummy cell not responsible for point * */ QuadNode<T>& getAppropriateLeaf(double angle, double r) { assert(this->responsible(angle, r)); if (isLeaf) return *this;//will this return the reference to the subtree itself or to a copy? else { for (index i = 0; i < children.size(); i++) { bool foundResponsibleChild = false; if (children[i].responsible(angle, r)) { assert(foundResponsibleChild == false); foundResponsibleChild = true; return children[i].getAppropriateLeaf(angle, r); } } throw std::runtime_error("No responsible child found."); } } /** * Main query method, get points lying in a Euclidean circle around the center point. * Optional limits can be given to get a different result or to reduce unnecessary comparisons * * Elements are pushed onto a vector which is a required argument. This is done to reduce copying * * Safe to call in parallel if diagnostics are disabled * * @param center Center of the query circle * @param radius Radius of the query circle * @param result Reference to the vector where the results will be stored * @param minAngle Optional value for the minimum angular coordinate of the query region * @param maxAngle Optional value for the maximum angular coordinate of the query region * @param lowR Optional value for the minimum radial coordinate of the query region * @param highR Optional value for the maximum radial coordinate of the query region */ void getElementsInEuclideanCircle(Point2D<double> center, double radius, vector<T> &result, double minAngle=0, double maxAngle=2*PI, double lowR=0, double highR = 1) const { if (!poincare) throw std::runtime_error("Euclidean query circles not yet implemented for native hyperbolic coordinates."); if (minAngle >= rightAngle || maxAngle <= leftAngle || lowR >= maxR || highR < lowerBoundR) return; if (outOfReach(center, radius)) { return; } if (isLeaf) { const double rsq = radius*radius; const double queryX = center[0]; const double queryY = center[1]; const count cSize = content.size(); for (index i = 0; i < cSize; i++) { const double deltaX = positions[i].getX() - queryX; const double deltaY = positions[i].getY() - queryY; if (deltaX*deltaX + deltaY*deltaY < rsq) { result.push_back(content[i]); } } } else { for (index i = 0; i < children.size(); i++) { children[i].getElementsInEuclideanCircle(center, radius, result, minAngle, maxAngle, lowR, highR); } } } count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, bool suppressLeft, vector<T> &result) const { double phi_q, r_q; HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q); if (suppressLeft && phi_q > rightAngle) return 0; TRACE("Getting hyperbolic distances"); auto distancePair = hyperbolicDistances(phi_q, r_q); double probUB = prob(distancePair.first); double probLB = prob(distancePair.second); #ifndef NDEBUG assert(probLB <= probUB); #else ((void)(probLB)); #endif if (probUB > 0.5) probUB = 1;//if we are going to take every second element anyway, no use in calculating expensive jumps if (probUB == 0) return 0; //TODO: return whole if probLB == 1 double probdenom = std::log(1-probUB); if (probdenom == 0) { DEBUG(probUB, " not zero, but too small too process. Ignoring."); return 0; } TRACE("probUB: ", probUB, ", probdenom: ", probdenom); count expectedNeighbours = probUB*size(); count candidatesTested = 0; if (isLeaf) { const count lsize = content.size(); TRACE("Leaf of size ", lsize); for (index i = 0; i < lsize; i++) { //jump! if (probUB < 1) { double random = Aux::Random::real(); double delta = std::log(random) / probdenom; assert(delta == delta); assert(delta >= 0); i += delta; if (i >= lsize) break; TRACE("Jumped with delta ", delta, " arrived at ", i); } //see where we've arrived candidatesTested++; double distance; if (poincare) { distance = HyperbolicSpace::poincareMetric(positions[i], euQuery); } else { distance = HyperbolicSpace::nativeDistance(angles[i], radii[i], phi_q, r_q); } assert(distance >= distancePair.first); double q = prob(distance); q = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities assert(q <= 1); assert(q >= 0); //accept? double acc = Aux::Random::real(); if (acc < q) { TRACE("Accepted node ", i, " with probability ", q, "."); result.push_back(content[i]); } } } else { if (expectedNeighbours < 1) {//select candidates directly instead of calling recursively TRACE("probUB = ", probUB, ", switching to direct candidate selection."); assert(probUB < 1); const count stsize = size(); for (index i = 0; i < stsize; i++) { double delta = std::log(Aux::Random::real()) / probdenom; assert(delta >= 0); i += delta; TRACE("Jumped with delta ", delta, " arrived at ", i, ". Calling maybeGetKthElement."); if (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point else break; candidatesTested++; } } else {//carry on as normal for (index i = 0; i < children.size(); i++) { TRACE("Recursively calling child ", i); candidatesTested += children[i].getElementsProbabilistically(euQuery, prob, suppressLeft, result); } } } //DEBUG("Expected at most ", expectedNeighbours, " neighbours, got ", result.size() - offset); return candidatesTested; } void maybeGetKthElement(double upperBound, Point2D<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const { TRACE("Maybe get element ", k, " with upper Bound ", upperBound); assert(k < size()); if (isLeaf) { double distance; if (poincare) { distance = HyperbolicSpace::poincareMetric(positions[k], euQuery); } else { double phi_q, r_q; HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q); distance = HyperbolicSpace::nativeDistance(angles[k], radii[k], phi_q, r_q); } double acceptance = prob(distance)/upperBound; TRACE("Is leaf, accept with ", acceptance); if (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]); } else { TRACE("Call recursively."); index offset = 0; for (index i = 0; i < children.size(); i++) { count childsize = children[i].size(); if (k - offset < childsize) { children[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens); break; } offset += childsize; } } } /** * Shrink all vectors in this subtree to fit the content. * Call after quadtree construction is complete, causes better memory usage and cache efficiency */ void trim() { content.shrink_to_fit(); positions.shrink_to_fit(); angles.shrink_to_fit(); radii.shrink_to_fit(); if (!isLeaf) { for (index i = 0; i < children.size(); i++) { children[i].trim(); } } } /** * Number of points lying in the region managed by this QuadNode */ count size() const { return isLeaf ? content.size() : subTreeSize; } void recount() { subTreeSize = 0; for (index i = 0; i < children.size(); i++) { children[i].recount(); subTreeSize += children[i].size(); } } /** * Height of subtree hanging from this QuadNode */ count height() const { count result = 1;//if leaf node, the children loop will not execute for (auto child : children) result = std::max(result, child.height()+1); return result; } /** * Leaf cells in the subtree hanging from this QuadNode */ count countLeaves() const { if (isLeaf) return 1; count result = 0; for (index i = 0; i < children.size(); i++) { result += children[i].countLeaves(); } return result; } double getLeftAngle() const { return leftAngle; } double getRightAngle() const { return rightAngle; } double getMinR() const { return minR; } double getMaxR() const { return maxR; } index getID() const { return ID; } index indexSubtree(index nextID) { index result = nextID; assert(children.size() == 4 || children.size() == 0); for (index i = 0; i < children.size(); i++) { result = children[i].indexSubtree(result); } this->ID = result; return result+1; } index getCellID(double phi, double r) const { if (!responsible(phi, r)) return none; if (isLeaf) return getID(); else { for (index i = 0; i < children.size(); i++) { index childresult = children[i].getCellID(phi, r); if (childresult != none) return childresult; } throw std::runtime_error("No responsible child node found even though this node is responsible."); } } index getMaxIDInSubtree() const { if (isLeaf) return getID(); else { index result = -1; for (int i = 0; i < 4; i++) { result = std::max(children[i].getMaxIDInSubtree(), result); } return std::max(result, getID()); } } count reindex(count offset) { if (isLeaf) { #ifndef NETWORKIT_OMP2 #pragma omp task #endif { index p = offset; std::generate(content.begin(), content.end(), [&p](){return p++;}); } offset += size(); } else { for (int i = 0; i < 4; i++) { offset = children[i].reindex(offset); } } return offset; } }; } #endif /* QUADNODE_H_ */
GB_unop__lnot_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__lnot_fp64_fp64) // op(A') function: GB (_unop_tran__lnot_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = !(z != 0) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__lnot_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = !(z != 0) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = !(z != 0) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__lnot_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolutiondepthwise_3x3_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void convdw3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const signed char* kernel = (const signed char *)_kernel + p*9; int* outptr0 = out; int* outptr0n = outptr0 + outw; const signed char* img0 = bottom_blob.channel(p); const signed char* r0 = img0; const signed char* r1 = img0 + w; const signed char* r2 = img0 + w*2; const signed char* r3 = img0 + w*3; int i = 0; #if __ARM_NEON int8x16_t _k0123456789x = vld1q_s8(kernel); int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x)); int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x)); int16x4_t _k0123 = vget_low_s16(_k_s16); int16x4_t _k4567 = vget_high_s16(_k_s16); int16x4_t _k8xxx = vget_low_s16(_kn_s16); #endif // __ARM_NEON for (; i+1 < outh; i+=2) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v4.8b, v5.8b}, [%3] \n" "ld1 {v6.8b, v7.8b}, [%4] \n" "ld1 {v8.8b, v9.8b}, [%5] \n" "ld1 {v10.8b, v11.8b}, [%6] \n" "add %3, %3, #8 \n" "add %4, %4, #8 \n" "add %5, %5, #8 \n" "add %6, %6, #8 \n" "ext v12.8b, v4.8b, v5.8b, #1 \n" "ext v13.8b, v4.8b, v5.8b, #2 \n" "ext v14.8b, v6.8b, v7.8b, #1 \n" "ext v15.8b, v6.8b, v7.8b, #2 \n" "ext v16.8b, v8.8b, v9.8b, #1 \n" "ext v17.8b, v8.8b, v9.8b, #2 \n" "ext v18.8b, v10.8b, v11.8b, #1 \n" "ext v19.8b, v10.8b, v11.8b, #2 \n" "sshll v4.8h, v4.8b, #0 \n"// r00 "sshll v12.8h, v12.8b, #0 \n"// r01 "sshll v13.8h, v13.8b, #0 \n"// r02 "sshll v6.8h, v6.8b, #0 \n"// r10 "sshll v14.8h, v14.8b, #0 \n"// r11 "sshll v15.8h, v15.8b, #0 \n"// r12 "sshll v8.8h, v8.8b, #0 \n"// r20 "sshll v16.8h, v16.8b, #0 \n"// r21 "sshll v17.8h, v17.8b, #0 \n"// r22 "sshll v10.8h, v10.8b, #0 \n"// r30 "sshll v18.8h, v18.8b, #0 \n"// r31 "sshll v19.8h, v19.8b, #0 \n"// r32 // r0 "smull v20.4s, v4.4h, %14.h[0] \n"// (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %14.h[0] \n" "smull v22.4s, v12.4h, %14.h[1] \n"// (r01 - r08) * k01 "smull2 v23.4s, v12.8h, %14.h[1] \n" "smull v24.4s, v13.4h, %14.h[2] \n"// (r02 - r09) * k02 "smull2 v25.4s, v13.8h, %14.h[2] \n" // r1 "smull v26.4s, v6.4h, %14.h[0] \n"// (r10 - r17) * k00 "smull2 v27.4s, v6.8h, %14.h[0] \n" "smull v28.4s, v14.4h, %14.h[1] \n"// (r11 - r18) * k01 "smull2 v29.4s, v14.8h, %14.h[1] \n" "smull v30.4s, v15.4h, %14.h[2] \n"// (r12 - r19) * k02 "smull2 v31.4s, v15.8h, %14.h[2] \n" "smlal v20.4s, v6.4h, %14.h[3] \n"// (r10 - r17) * k03 "smlal2 v21.4s, v6.8h, %14.h[3] \n" "smlal v22.4s, v14.4h, %15.h[0] \n"// (r11 - r18) * k04 "smlal2 v23.4s, v14.8h, %15.h[0] \n" "smlal v24.4s, v15.4h, %15.h[1] \n"// (r12 - r19) * k05 "smlal2 v25.4s, v15.8h, %15.h[1] \n" // r2 "smlal v26.4s, v8.4h, %14.h[3] \n"// (r20 - r27) * k03 "smlal2 v27.4s, v8.8h, %14.h[3] \n" "smlal v28.4s, v16.4h, %15.h[0] \n"// (r21 - r28) * k04 "smlal2 v29.4s, v16.8h, %15.h[0] \n" "smlal v30.4s, v17.4h, %15.h[1] \n"// (r22 - r29) * k05 "smlal2 v31.4s, v17.8h, %15.h[1] \n" "smlal v20.4s, v8.4h, %15.h[2] \n"// (r20 - r27) * k06 "smlal2 v21.4s, v8.8h, %15.h[2] \n" "smlal v22.4s, v16.4h, %15.h[3] \n"// (r21 - r28) * k07 "smlal2 v23.4s, v16.8h, %15.h[3] \n" "smlal v24.4s, v17.4h, %16.h[0] \n"// (r22 - r29) * k08 "smlal2 v25.4s, v17.8h, %16.h[0] \n" // r3 "smlal v26.4s, v10.4h, %15.h[2] \n"// (r30 - r37) * k06 "smlal2 v27.4s, v10.8h, %15.h[2] \n" "smlal v28.4s, v18.4h, %15.h[3] \n"// (r31 - r38) * k07 "smlal2 v29.4s, v18.8h, %15.h[3] \n" "smlal v30.4s, v19.4h, %16.h[0] \n"// (r32 - r39) * k08 "smlal2 v31.4s, v19.8h, %16.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v26.4s, v26.4s, v28.4s \n" "add v27.4s, v27.4s, v29.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" "add v26.4s, v26.4s, v30.4s \n" "add v27.4s, v27.4s, v31.4s \n" "st1 {v20.4s, v21.4s}, [%1], #32 \n" "st1 {v26.4s, v27.4s}, [%2], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr0n), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr0), "2"(outptr0n), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k8xxx) // %16 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld1.s8 {d8-d9}, [%3] \n"// r0 "add %3, %3, #8 \n" "vext.s8 d10, d8, d9, #1 \n" "vext.s8 d12, d8, d9, #2 \n" "vmovl.s8 q4, d8 \n"// r00 "vmovl.s8 q5, d10 \n"// r01 "vmovl.s8 q6, d12 \n"// r02 // sum0 "vmull.s16 q7, d8, %P14[0] \n"// (r00 - r07) * k00 "vmull.s16 q8, d9, %P14[0] \n" "vmull.s16 q9, d10, %P14[1] \n"// (r01 - r08) * k01 "vmull.s16 q10, d11, %P14[1] \n" "vmlal.s16 q7, d12, %P14[2] \n"// (r02 - r09) * k02 "vmlal.s16 q8, d13, %P14[2] \n" // r1 "vld1.s8 {d8-d9}, [%4] \n"// r1 "add %4, %4, #8 \n" "vext.s8 d10, d8, d9, #1 \n" "vext.s8 d12, d8, d9, #2 \n" "vmovl.s8 q4, d8 \n"// r10 "vmovl.s8 q5, d10 \n"// r11 "vmovl.s8 q6, d12 \n"// r12 // sum0 "vmlal.s16 q7, d8, %P14[3] \n"// (r10 - r17) * k03 "vmlal.s16 q8, d9, %P14[3] \n" "vmlal.s16 q9, d10, %P15[0] \n"// (r11 - r18) * k04 "vmlal.s16 q10, d11, %P15[0] \n" "vmlal.s16 q7, d12, %P15[1] \n"// (r12 - r19) * k05 "vmlal.s16 q8, d13, %P15[1] \n" // sum1 "vmull.s16 q11, d8, %P14[0] \n"// (r10 - r17) * k00 "vmull.s16 q12, d9, %P14[0] \n" "vmull.s16 q13, d10, %P14[1] \n"// (r11 - r18) * k01 "vmull.s16 q14, d11, %P14[1] \n" "vmlal.s16 q11, d12, %P14[2] \n"// (r12 - r19) * k02 "vmlal.s16 q12, d13, %P14[2] \n" // r2 "vld1.s8 {d8-d9}, [%5] \n"// r2 "add %5, %5, #8 \n" "vext.s8 d10, d8, d9, #1 \n" "vext.s8 d12, d8, d9, #2 \n" "vmovl.s8 q4, d8 \n"// r20 "vmovl.s8 q5, d10 \n"// r21 "vmovl.s8 q6, d12 \n"// r22 // sum0 "vmlal.s16 q7, d8, %P15[2] \n"// (r20 - r27) * k06 "vmlal.s16 q8, d9, %P15[2] \n" "vmlal.s16 q9, d10, %P15[3] \n"// (r21 - r28) * k07 "vmlal.s16 q10, d11, %P15[3] \n" "vmlal.s16 q7, d12, %P16[0] \n"// (r22 - r29) * k08 "vmlal.s16 q8, d13, %P16[0] \n" // sum1 "vmlal.s16 q11, d8, %P14[3] \n"// (r20 - r27) * k03 "vmlal.s16 q12, d9, %P14[3] \n" "vmlal.s16 q13, d10, %P15[0] \n"// (r21 - r28) * k04 "vmlal.s16 q14, d11, %P15[0] \n" "vmlal.s16 q11, d12, %P15[1] \n"// (r22 - r29) * k05 "vmlal.s16 q12, d13, %P15[1] \n" // r3 "vld1.s8 {d8-d9}, [%6] \n"// r3 "add %6, %6, #8 \n" "vext.s8 d10, d8, d9, #1 \n" "vext.s8 d12, d8, d9, #2 \n" "vmovl.s8 q4, d8 \n"// r30 "vmovl.s8 q5, d10 \n"// r31 "vmovl.s8 q6, d12 \n"// r32 // sum1 "vmlal.s16 q11, d8, %P15[2] \n"// (r30 - r37) * k06 "vmlal.s16 q12, d9, %P15[2] \n" "vmlal.s16 q13, d10, %P15[3] \n"// (r31 - r38) * k07 "vmlal.s16 q14, d11, %P15[3] \n" "vmlal.s16 q11, d12, %P16[0] \n"// (r32 - r39) * k08 "vmlal.s16 q12, d13, %P16[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vadd.s32 q11, q11, q13 \n" "vadd.s32 q12, q12, q14 \n" "vst1.s32 {d14-d17}, [%1]! \n" "vst1.s32 {d22-d25}, [%2]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr0n), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr0), "2"(outptr0n), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k8xxx) // %16 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { // TODO NEON int sum0 = 0; int sum0n = 0; sum0 += (int)r0[0] * kernel[0]; sum0 += (int)r0[1] * kernel[1]; sum0 += (int)r0[2] * kernel[2]; sum0 += (int)r1[0] * kernel[3]; sum0 += (int)r1[1] * kernel[4]; sum0 += (int)r1[2] * kernel[5]; sum0 += (int)r2[0] * kernel[6]; sum0 += (int)r2[1] * kernel[7]; sum0 += (int)r2[2] * kernel[8]; sum0n += (int)r1[0] * kernel[0]; sum0n += (int)r1[1] * kernel[1]; sum0n += (int)r1[2] * kernel[2]; sum0n += (int)r2[0] * kernel[3]; sum0n += (int)r2[1] * kernel[4]; sum0n += (int)r2[2] * kernel[5]; sum0n += (int)r3[0] * kernel[6]; sum0n += (int)r3[1] * kernel[7]; sum0n += (int)r3[2] * kernel[8]; *outptr0 = sum0; *outptr0n = sum0n; r0++; r1++; r2++; r3++; outptr0++; outptr0n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr0n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld1 {v4.8b, v5.8b}, [%2] \n" "ld1 {v6.8b, v7.8b}, [%3] \n" "ld1 {v8.8b, v9.8b}, [%4] \n" "add %2, %2, #8 \n" "add %3, %3, #8 \n" "add %4, %4, #8 \n" "ext v12.8b, v4.8b, v5.8b, #1 \n" "ext v13.8b, v4.8b, v5.8b, #2 \n" "ext v14.8b, v6.8b, v7.8b, #1 \n" "ext v15.8b, v6.8b, v7.8b, #2 \n" "ext v16.8b, v8.8b, v9.8b, #1 \n" "ext v17.8b, v8.8b, v9.8b, #2 \n" "sshll v4.8h, v4.8b, #0 \n"// r00 "sshll v12.8h, v12.8b, #0 \n"// r01 "sshll v13.8h, v13.8b, #0 \n"// r02 "sshll v6.8h, v6.8b, #0 \n"// r10 "sshll v14.8h, v14.8b, #0 \n"// r11 "sshll v15.8h, v15.8b, #0 \n"// r12 "sshll v8.8h, v8.8b, #0 \n"// r20 "sshll v16.8h, v16.8b, #0 \n"// r21 "sshll v17.8h, v17.8b, #0 \n"// r22 // r0 "smull v20.4s, v4.4h, %10.h[0] \n"// (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %10.h[0] \n" "smull v22.4s, v12.4h, %10.h[1] \n"// (r01 - r08) * k01 "smull2 v23.4s, v12.8h, %10.h[1] \n" "smull v24.4s, v13.4h, %10.h[2] \n"// (r02 - r09) * k02 "smull2 v25.4s, v13.8h, %10.h[2] \n" // r1 "smlal v20.4s, v6.4h, %10.h[3] \n"// (r10 - r17) * k03 "smlal2 v21.4s, v6.8h, %10.h[3] \n" "smlal v22.4s, v14.4h, %11.h[0] \n"// (r11 - r18) * k04 "smlal2 v23.4s, v14.8h, %11.h[0] \n" "smlal v24.4s, v15.4h, %11.h[1] \n"// (r12 - r19) * k05 "smlal2 v25.4s, v15.8h, %11.h[1] \n" // r2 "smlal v20.4s, v8.4h, %11.h[2] \n"// (r20 - r27) * k06 "smlal2 v21.4s, v8.8h, %11.h[2] \n" "smlal v22.4s, v16.4h, %11.h[3] \n"// (r21 - r28) * k07 "smlal2 v23.4s, v16.8h, %11.h[3] \n" "smlal v24.4s, v17.4h, %12.h[0] \n"// (r22 - r29) * k08 "smlal2 v25.4s, v17.8h, %12.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" "st1 {v20.4s, v21.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx) // %12 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" ); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld1.s8 {d8-d9}, [%2] \n"// r0 "add %2, %2, #8 \n" "vext.s8 d10, d8, d9, #1 \n" "vext.s8 d12, d8, d9, #2 \n" "vmovl.s8 q4, d8 \n"// r00 "vmovl.s8 q5, d10 \n"// r01 "vmovl.s8 q6, d12 \n"// r02 // sum0 "vmull.s16 q7, d8, %P10[0] \n"// (r00 - r07) * k00 "vmull.s16 q8, d9, %P10[0] \n" "vmull.s16 q9, d10, %P10[1] \n"// (r01 - r08) * k01 "vmull.s16 q10, d11, %P10[1] \n" "vmlal.s16 q7, d12, %P10[2] \n"// (r02 - r09) * k02 "vmlal.s16 q8, d13, %P10[2] \n" // r1 "vld1.s8 {d8-d9}, [%3] \n"// r1 "add %3, %3, #8 \n" "vext.s8 d10, d8, d9, #1 \n" "vext.s8 d12, d8, d9, #2 \n" "vmovl.s8 q4, d8 \n"// r10 "vmovl.s8 q5, d10 \n"// r11 "vmovl.s8 q6, d12 \n"// r12 // sum0 "vmlal.s16 q7, d8, %P10[3] \n"// (r10 - r17) * k03 "vmlal.s16 q8, d9, %P10[3] \n" "vmlal.s16 q9, d10, %P11[0] \n"// (r11 - r18) * k04 "vmlal.s16 q10, d11, %P11[0] \n" "vmlal.s16 q7, d12, %P11[1] \n"// (r12 - r19) * k05 "vmlal.s16 q8, d13, %P11[1] \n" // r2 "vld1.s8 {d8-d9}, [%4] \n"// r2 "add %4, %4, #8 \n" "vext.s8 d10, d8, d9, #1 \n" "vext.s8 d12, d8, d9, #2 \n" "vmovl.s8 q4, d8 \n"// r20 "vmovl.s8 q5, d10 \n"// r21 "vmovl.s8 q6, d12 \n"// r22 // sum0 "vmlal.s16 q7, d8, %P11[2] \n"// (r20 - r27) * k06 "vmlal.s16 q8, d9, %P11[2] \n" "vmlal.s16 q9, d10, %P11[3] \n"// (r21 - r28) * k07 "vmlal.s16 q10, d11, %P11[3] \n" "vmlal.s16 q7, d12, %P12[0] \n"// (r22 - r29) * k08 "vmlal.s16 q8, d13, %P12[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vst1.s32 {d14-d17}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx) // %12 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { int sum = 0; sum += (int)r0[0] * kernel[0]; sum += (int)r0[1] * kernel[1]; sum += (int)r0[2] * kernel[2]; sum += (int)r1[0] * kernel[3]; sum += (int)r1[1] * kernel[4]; sum += (int)r1[2] * kernel[5]; sum += (int)r2[0] * kernel[6]; sum += (int)r2[1] * kernel[7]; sum += (int)r2[2] * kernel[8]; *outptr0 = sum; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const signed char* kernel = (const signed char*)_kernel + p*9; int* outptr = out; const signed char* img = bottom_blob.channel(p); const signed char* r0 = img; const signed char* r1 = img + w; const signed char* r2 = img + w*2; int i = 0; #if __ARM_NEON int8x16_t _k0123456789x = vld1q_s8(kernel); int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x)); int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x)); int16x4_t _k0123 = vget_low_s16(_k_s16); int16x4_t _k4567 = vget_high_s16(_k_s16); int16x4_t _k8xxx = vget_low_s16(_kn_s16); #endif // __ARM_NEON for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 3; int remain = outw & 7; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "ld2 {v4.8b, v5.8b}, [%2], #16 \n" "ld2 {v6.8b, v7.8b}, [%2] \n" "ld2 {v8.8b, v9.8b}, [%3], #16 \n" "ld2 {v10.8b, v11.8b}, [%3] \n" "ld2 {v12.8b, v13.8b}, [%4], #16 \n" "ld2 {v14.8b, v15.8b}, [%4] \n" "ext v6.8b, v4.8b, v6.8b, #1 \n" "ext v10.8b, v8.8b, v10.8b, #1 \n" "ext v14.8b, v12.8b, v14.8b, #1 \n" "sshll v4.8h, v4.8b, #0 \n"// r00 "sshll v5.8h, v5.8b, #0 \n"// r01 "sshll v6.8h, v6.8b, #0 \n"// r02 "sshll v8.8h, v8.8b, #0 \n"// r10 "sshll v9.8h, v9.8b, #0 \n"// r11 "sshll v10.8h, v10.8b, #0 \n"// r12 "sshll v12.8h, v12.8b, #0 \n"// r20 "sshll v13.8h, v13.8b, #0 \n"// r21 "sshll v14.8h, v14.8b, #0 \n"// r22 // r0 "smull v20.4s, v4.4h, %10.h[0] \n"// (r00 - r07) * k00 "smull2 v21.4s, v4.8h, %10.h[0] \n" "smull v22.4s, v5.4h, %10.h[1] \n"// (r01 - r08) * k01 "smull2 v23.4s, v5.8h, %10.h[1] \n" "smull v24.4s, v6.4h, %10.h[2] \n"// (r02 - r09) * k02 "smull2 v25.4s, v6.8h, %10.h[2] \n" // r1 "smlal v20.4s, v8.4h, %10.h[3] \n"// (r10 - r17) * k03 "smlal2 v21.4s, v8.8h, %10.h[3] \n" "smlal v22.4s, v9.4h, %11.h[0] \n"// (r11 - r18) * k04 "smlal2 v23.4s, v9.8h, %11.h[0] \n" "smlal v24.4s, v10.4h, %11.h[1] \n"// (r12 - r19) * k05 "smlal2 v25.4s, v10.8h, %11.h[1] \n" // r2 "smlal v20.4s, v12.4h, %11.h[2] \n"// (r20 - r27) * k06 "smlal2 v21.4s, v12.8h, %11.h[2] \n" "smlal v22.4s, v13.4h, %11.h[3] \n"// (r21 - r28) * k07 "smlal2 v23.4s, v13.8h, %11.h[3] \n" "smlal v24.4s, v14.4h, %12.h[0] \n"// (r22 - r29) * k08 "smlal2 v25.4s, v14.8h, %12.h[0] \n" // add and save "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v20.4s, v20.4s, v24.4s \n" "add v21.4s, v21.4s, v25.4s \n" "st1 {v20.4s, v21.4s}, [%1], #32 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx) // %12 : "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25" ); } #else if (nn > 0) { asm volatile( "0: \n" // r0 "vld2.s8 {d8-d9}, [%2]! \n"// r0 "vld2.s8 {d10-d11}, [%2] \n" "vext.s8 d12, d8, d10, #1 \n" "vmovl.s8 q5, d9 \n"// r01 "vmovl.s8 q4, d8 \n"// r00 "vmovl.s8 q6, d12 \n"// r02 // sum0 "vmull.s16 q7, d8, %P10[0] \n"// (r00 - r07) * k00 "vmull.s16 q8, d9, %P10[0] \n" "vmull.s16 q9, d10, %P10[1] \n"// (r01 - r08) * k01 "vmull.s16 q10, d11, %P10[1] \n" "vmlal.s16 q7, d12, %P10[2] \n"// (r02 - r09) * k02 "vmlal.s16 q8, d13, %P10[2] \n" // r1 "vld2.s8 {d8-d9}, [%3]! \n"// r1 "vld2.s8 {d10-d11}, [%3] \n" "vext.s8 d12, d8, d10, #1 \n" "vmovl.s8 q5, d9 \n"// r11 "vmovl.s8 q4, d8 \n"// r10 "vmovl.s8 q6, d12 \n"// r12 // sum0 "vmlal.s16 q7, d8, %P10[3] \n"// (r10 - r17) * k03 "vmlal.s16 q8, d9, %P10[3] \n" "vmlal.s16 q9, d10, %P11[0] \n"// (r11 - r18) * k04 "vmlal.s16 q10, d11, %P11[0] \n" "vmlal.s16 q7, d12, %P11[1] \n"// (r12 - r19) * k05 "vmlal.s16 q8, d13, %P11[1] \n" // r2 "vld2.s8 {d8-d9}, [%4]! \n"// r2 "vld2.s8 {d10-d11}, [%4] \n" "vext.s8 d12, d8, d10, #1 \n" "vmovl.s8 q5, d9 \n"// r21 "vmovl.s8 q4, d8 \n"// r20 "vmovl.s8 q6, d12 \n"// r22 // sum0 "vmlal.s16 q7, d8, %P11[2] \n"// (r20 - r27) * k06 "vmlal.s16 q8, d9, %P11[2] \n" "vmlal.s16 q9, d10, %P11[3] \n"// (r21 - r28) * k07 "vmlal.s16 q10, d11, %P11[3] \n" "vmlal.s16 q7, d12, %P12[0] \n"// (r22 - r29) * k08 "vmlal.s16 q8, d13, %P12[0] \n" "subs %0, %0, #1 \n" // add and save "vadd.s32 q7, q7, q9 \n" "vadd.s32 q8, q8, q10 \n" "vst1.s32 {d14-d17}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k4567), // %11 "w"(_k8xxx) // %12 : "cc", "memory", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { int sum = 0; sum += (int)r0[0] * kernel[0]; sum += (int)r0[1] * kernel[1]; sum += (int)r0[2] * kernel[2]; sum += (int)r1[0] * kernel[3]; sum += (int)r1[1] * kernel[4]; sum += (int)r1[2] * kernel[5]; sum += (int)r2[0] * kernel[6]; sum += (int)r2[1] * kernel[7]; sum += (int)r2[2] * kernel[8]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
computeGraph.c
#include "defs.h" double computeGraph(graph* G, graphSDG* SDGdata) { mcsim_skip_instrs_begin(); VERT_T* endV; LONG_T *degree, *numEdges, *pos, *pSums; WEIGHT_T* w; double elapsed_time; #ifdef _OPENMP omp_lock_t *vLock; LONG_T chunkSize; #endif elapsed_time = get_seconds(); #ifdef _OPENMP omp_set_num_threads(NUM_THREADS); #endif #ifdef _OPENMP #pragma omp parallel { #endif LONG_T i, j, u, n, m, tid, nthreads; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP nthreads = omp_get_num_threads(); tid = omp_get_thread_num(); #else tid = 0; nthreads = 1; #endif n = N; m = M; if (tid == 0) { #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); assert(vLock != NULL); chunkSize = n/nthreads; #endif pos = (LONG_T *) malloc(m*sizeof(LONG_T)); assert(pos != NULL); degree = (LONG_T *) calloc(n, sizeof(LONG_T)); assert(degree != NULL); } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(static, chunkSize) for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #pragma omp barrier #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Lock initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif #pragma omp for #endif for (i=0; i<m; i++) { u = SDGdata->startVertex[i]; #ifdef _OPENMP omp_set_lock(&vLock[u]); #endif pos[i] = degree[u]++; #ifdef _OPENMP omp_unset_lock(&vLock[u]); #endif } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(static, chunkSize) for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } if (tid == 0) free(vLock); #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Lock destruction time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif if (tid == 0) { numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T)); pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #endif prefix_sums(degree, numEdges, pSums, n); #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Prefix sums time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { free(degree); free(pSums); w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T)); endV = (VERT_T *) malloc(m* sizeof(VERT_T)); } mcsim_skip_instrs_end(); #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { mcsim_skip_instrs_begin(); #if defined(UNDOLOG) || defined(REDOLOG) LONG_T undolog_u, redolog_u, undolog_j, redolog_j; VERT_T *undolog_endV, *redolog_endV; WEIGHT_T* undolog_w, *redolog_w; undolog_w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T)); redolog_w = (WEIGHT_T *) malloc(m*sizeof(WEIGHT_T)); undolog_endV = (VERT_T *) malloc(m* sizeof(VERT_T)); redolog_endV = (VERT_T *) malloc(m* sizeof(VERT_T)); #endif // UNDOLOG || REDOLOG mcsim_skip_instrs_end(); mcsim_tx_begin(); #ifdef BASELINE mcsim_log_begin(); //mcsim_skip_instrs_begin(); #ifdef UNDOLOG undolog_u = u; undolog_j = j; undolog_endV[j] = endV[j-1]; undolog_w[j] = w[j-1]; #endif // UNDOLOG #ifdef REDOLOG redolog_u = SDGdata->startVertex[i]; redolog_j = numEdges[u] + pos[i]; redolog_endV[j] = SDGdata->endVertex[i]; redolog_w[j] = SDGdata->weight[i]; #endif // REDOLOG //mcsim_skip_instrs_end(); mcsim_mem_fence(); mcsim_log_end(); mcsim_mem_fence(); #endif // BASELINE u = SDGdata->startVertex[i]; j = numEdges[u] + pos[i]; endV[j] = SDGdata->endVertex[i]; w[j] = SDGdata->weight[i]; mcsim_tx_end(); #ifdef CLWB mcsim_clwb( &( u ) ); mcsim_clwb( &( j ) ); mcsim_clwb( &( endV[j] ) ); mcsim_clwb( &( w[j] ) ); #endif // CLWB // make sure undolog and redolog data structures are not discarded by compiler #if defined(UNDOLOG) || defined(REDOLOG) mcsim_skip_instrs_begin(); printf("%d\n", (int)((sizeof undolog_u) + (sizeof undolog_j) + (sizeof undolog_endV) + (sizeof undolog_w) + (sizeof redolog_u) + (sizeof redolog_j) + (sizeof redolog_endV) + (sizeof redolog_w))); mcsim_skip_instrs_end(); #endif // UNDOLOG || REDOLOG } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "Edge data structure construction time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif if (tid == 0) { free(pos); mcsim_skip_instrs_begin(); #ifdef UNDOLOG graph *undolog_G; undolog_G = (graph *) malloc(sizeof(graph)); #endif // UNDOLOG #ifdef REDOLOG graph *redolog_G; redolog_G = (graph *) malloc(sizeof(graph)); #endif // REDOLOG mcsim_skip_instrs_end(); mcsim_tx_begin(); #ifdef BASELINE mcsim_log_begin(); //mcsim_skip_instrs_begin(); #ifdef UNDOLOG undolog_G->n = G->n; undolog_G->m = G->m; undolog_G->numEdges = G->numEdges; undolog_G->endV = G->endV; undolog_G->weight = G->weight; #endif // UNDOLOG #ifdef REDOLOG redolog_G->n = n; redolog_G->m = m; redolog_G->numEdges = numEdges; redolog_G->endV = endV; redolog_G->weight = w; #endif // REDOLOG //mcsim_skip_instrs_end(); mcsim_mem_fence(); mcsim_log_end(); mcsim_mem_fence(); #endif // BASELINE G->n = n; G->m = m; G->numEdges = numEdges; G->endV = endV; G->weight = w; mcsim_tx_end(); #ifdef CLWB mcsim_clwb( &( G->n ) ); mcsim_clwb( &( G->m ) ); mcsim_clwb( &( G->numEdges ) ); mcsim_clwb( &( G->endV ) ); mcsim_clwb( &( G->weight ) ); #endif // CLWB // make sure undolog and redolog data structures are not discarded by compiler mcsim_skip_instrs_begin(); #ifdef UNDOLOG printf("%d\n", (int)(sizeof undolog_G)); #endif // UNDOLOG #ifdef REDOLOG printf("%d\n", (int)(sizeof redolog_G)); #endif // REDOLOG mcsim_skip_instrs_end(); } #ifdef _OPENMP } #endif mcsim_skip_instrs_begin(); /* Verification */ #if 0 fprintf(stderr, "SDG data:\n"); for (int i=0; i<SDGdata->m; i++) { fprintf(stderr, "[%ld %ld %ld] ", SDGdata->startVertex[i], SDGdata->endVertex[i], SDGdata->weight[i]); } fprintf(stderr, "\n"); for (int i=0; i<G->n + 1; i++) { fprintf(stderr, "[%ld] ", G->numEdges[i]); } fprintf(stderr, "\nGraph:\n"); for (int i=0; i<G->n; i++) { for (int j=G->numEdges[i]; j<G->numEdges[i+1]; j++) { fprintf(stderr, "[%ld %ld %ld] ", i, G->endV[j], G->weight[j]); } } #endif free(SDGdata->startVertex); free(SDGdata->endVertex); free(SDGdata->weight); elapsed_time = get_seconds() - elapsed_time; mcsim_skip_instrs_end(); return elapsed_time; }
voronoinoise.h
#pragma once #ifndef VORONOI_NOISE_H #define VORONOI_NOISE_H #include "noisecommon.h" #define DEFAULT_VORONOI_FREQUENCY 1.0 #define DEFAULT_VORONOI_DISPLACEMENT 1.0 #define DEFAULT_VORONOI_SEED 0 #define DEFAULT_VORONOI_ENABLE_DISTANCE true #define DEFAULT_VORONOI_POSITION_X 0.0 #define DEFAULT_VORONOI_POSITION_Y 0.0 #define DEFAULT_VORONOI_POSITION_Z 0.0 #define DEFAULT_VORONOI_STEP 0.01 #define DEFAULT_VORONOI_PARALLEL false struct VoronoiNoise { float frequency; float displacement; int seed; unsigned char enable_distance; float position[3]; float step; bool parallel; float *(*voronoi_func)(struct VoronoiNoise *, size_t, size_t, size_t); }; static inline float *voronoi_noise_eval_1d(struct VoronoiNoise *voronoi_noise, size_t x_size); static inline float *voronoi_noise_eval_2d(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size); static inline float *voronoi_noise_eval_3d(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size); static inline float voronoi_noise_eval_3d_single(struct VoronoiNoise *voronoi_noise, float x_pos, float y_pos, float z_pos); static inline float *voronoi_noise_eval_3d_fallback(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size); static inline float *voronoi_noise_eval_3d_sse2(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size); static inline float *voronoi_noise_eval_3d_sse4_1(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size); static inline float *voronoi_noise_eval_3d_avx(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size); static inline float *voronoi_noise_eval_3d_avx2(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size); static inline float *voronoi_noise_eval_3d_avx512(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size); static inline void voronoi_noise_init(struct VoronoiNoise *voronoi_noise) { voronoi_noise->frequency = DEFAULT_VORONOI_FREQUENCY; voronoi_noise->displacement = DEFAULT_VORONOI_DISPLACEMENT; voronoi_noise->seed = DEFAULT_VORONOI_SEED; voronoi_noise->enable_distance = DEFAULT_VORONOI_ENABLE_DISTANCE; voronoi_noise->position[0] = DEFAULT_VORONOI_POSITION_X; voronoi_noise->position[1] = DEFAULT_VORONOI_POSITION_Y; voronoi_noise->position[2] = DEFAULT_VORONOI_POSITION_Z; voronoi_noise->step = DEFAULT_VORONOI_STEP; voronoi_noise->parallel = DEFAULT_VORONOI_PARALLEL; switch (detect_simd_support()) { #ifdef ARCH_32_64 case SIMD_AVX512F: voronoi_noise->voronoi_func = &voronoi_noise_eval_3d_fallback; break; case SIMD_AVX2: voronoi_noise->voronoi_func = &voronoi_noise_eval_3d_avx2; break; case SIMD_AVX: voronoi_noise->voronoi_func = &voronoi_noise_eval_3d_avx; break; case SIMD_SSE4_1: voronoi_noise->voronoi_func = &voronoi_noise_eval_3d_sse4_1; break; case SIMD_SSE2: voronoi_noise->voronoi_func = &voronoi_noise_eval_3d_sse2; break; #else case SIMD_NEON: voronoi_noise->voronoi_func = &voronoi_noise_eval_3d_fallback; break; #endif default: voronoi_noise->voronoi_func = &voronoi_noise_eval_3d_fallback; break; } } static inline float *voronoi_noise_eval_1d(struct VoronoiNoise *voronoi_noise, size_t x_size) { return voronoi_noise->voronoi_func(voronoi_noise, x_size, 1, 1); } static inline float *voronoi_noise_eval_2d(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size) { return voronoi_noise->voronoi_func(voronoi_noise, x_size, y_size, 1); } static inline float *voronoi_noise_eval_3d(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size) { return voronoi_noise->voronoi_func(voronoi_noise, x_size, y_size, z_size); } static inline float voronoi_noise_eval_3d_single(struct VoronoiNoise *voronoi_noise, float x_pos, float y_pos, float z_pos) { float x = (voronoi_noise->position[0] + (x_pos * voronoi_noise->step)) * voronoi_noise->frequency; float y = (voronoi_noise->position[1] + (y_pos * voronoi_noise->step)) * voronoi_noise->frequency; float z = (voronoi_noise->position[2] + (z_pos * voronoi_noise->step)) * voronoi_noise->frequency; int x_int = (x > 0.0 ? (int)x : (int)x - 1); int y_int = (y > 0.0 ? (int)y : (int)y - 1); int z_int = (z > 0.0 ? (int)z : (int)z - 1); float min_dist = 2147483647.0; float x_candidate = 0; float y_candidate = 0; float z_candidate = 0; for (int z_cur = z_int - 2; z_cur <= z_int + 2; z_cur++) { for (int y_cur = y_int - 2; y_cur <= y_int + 2; y_cur++) { for (int x_cur = x_int - 2; x_cur <= x_int + 2; x_cur++) { float x_pos = x_cur + value_noise_3d(x_cur, y_cur, z_cur, voronoi_noise->seed); float y_pos = y_cur + value_noise_3d(x_cur, y_cur, z_cur, voronoi_noise->seed + 1); float z_pos = z_cur + value_noise_3d(x_cur, y_cur, z_cur, voronoi_noise->seed + 2); float x_dist = x_pos - x; float y_dist = y_pos - y; float z_dist = z_pos - z; float dist = x_dist * x_dist + y_dist * y_dist + z_dist * z_dist; if (dist < min_dist) { min_dist = dist; x_candidate = x_pos; y_candidate = y_pos; z_candidate = z_pos; } } } } float value; if (voronoi_noise->enable_distance) { float x_dist = x_candidate - x; float y_dist = y_candidate - y; float z_dist = z_candidate - z; value = sqrt(x_dist * x_dist + y_dist * y_dist + z_dist * z_dist) * SQRT_3 - 1.0; } else value = 0.0; return value + (voronoi_noise->displacement * value_noise_3d((int)floor(x_candidate), (int)floor(y_candidate), (int)floor(z_candidate), voronoi_noise->seed)); } static inline float *voronoi_noise_eval_3d_fallback(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size) { #ifdef CUSTOM_ALLOCATOR float *noise_set = malloc(sizeof(float) * x_size * y_size * z_size); #else float *noise_set = noise_allocate(sizeof(float), sizeof(float) * x_size * y_size * z_size); #endif #pragma omp parallel for collapse(3) if (voronoi_noise->parallel) for (int z_dim = 0; z_dim < z_size; z_dim++) { for (int y_dim = 0; y_dim < y_size; y_dim++) { for (int x_dim = 0; x_dim < x_size; x_dim++) { float x = (voronoi_noise->position[0] + (x_dim * voronoi_noise->step)) * voronoi_noise->frequency; float y = (voronoi_noise->position[1] + (y_dim * voronoi_noise->step)) * voronoi_noise->frequency; float z = (voronoi_noise->position[2] + (z_dim * voronoi_noise->step)) * voronoi_noise->frequency; int x_int = (x > 0.0 ? (int)x : (int)x - 1); int y_int = (y > 0.0 ? (int)y : (int)y - 1); int z_int = (z > 0.0 ? (int)z : (int)z - 1); float min_dist = 2147483647.0; float x_candidate = 0; float y_candidate = 0; float z_candidate = 0; for (int z_cur = z_int - 2; z_cur <= z_int + 2; z_cur++) { for (int y_cur = y_int - 2; y_cur <= y_int + 2; y_cur++) { for (int x_cur = x_int - 2; x_cur <= x_int + 2; x_cur++) { float x_pos = x_cur + value_noise_3d(x_cur, y_cur, z_cur, voronoi_noise->seed); float y_pos = y_cur + value_noise_3d(x_cur, y_cur, z_cur, voronoi_noise->seed + 1); float z_pos = z_cur + value_noise_3d(x_cur, y_cur, z_cur, voronoi_noise->seed + 2); float x_dist = x_pos - x; float y_dist = y_pos - y; float z_dist = z_pos - z; float dist = x_dist * x_dist + y_dist * y_dist + z_dist * z_dist; if (dist < min_dist) { min_dist = dist; x_candidate = x_pos; y_candidate = y_pos; z_candidate = z_pos; } } } } float value; if (voronoi_noise->enable_distance) { float x_dist = x_candidate - x; float y_dist = y_candidate - y; float z_dist = z_candidate - z; value = sqrt(x_dist * x_dist + y_dist * y_dist + z_dist * z_dist) * SQRT_3 - 1.0; } else value = 0.0; *(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size)))) = value + (voronoi_noise->displacement * value_noise_3d((int)floor(x_candidate), (int)floor(y_candidate), (int)floor(z_candidate), voronoi_noise->seed)); } } } return noise_set; } #ifdef ARCH_32_64 static inline float *voronoi_noise_eval_3d_sse2(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size) { float *noise_set = noise_allocate(sizeof(__m128), sizeof(float) * x_size * y_size * z_size); #pragma omp parallel for collapse(3) if (voronoi_noise->parallel) for (int z_dim = 0; z_dim < z_size; z_dim++) { for (int y_dim = 0; y_dim < y_size; y_dim++) { for (int x_dim = 0; x_dim < x_size; x_dim += 4) { __m128 x_vec = _mm_mul_ps(_mm_add_ps(_mm_set1_ps(voronoi_noise->position[0]), _mm_mul_ps(_mm_set_ps(x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm_set1_ps(voronoi_noise->step))), _mm_set1_ps(voronoi_noise->frequency)); float y = (voronoi_noise->position[1] * voronoi_noise->frequency) + (y_dim * voronoi_noise->step); float z = (voronoi_noise->position[2] * voronoi_noise->frequency) + (z_dim * voronoi_noise->step); __m128i x_int; for (int x_num = 0; x_num < 4; x_num++) *(((int32_t *)&x_int) + x_num) = (*(((float *)&x_vec) + x_num) > 0.0 ? (int)*(((float *)&x_vec) + x_num) : (int)*(((float *)&x_vec) + x_num) - 1); int y_int = (y > 0.0 ? (int)y : (int)y - 1); int z_int = (z > 0.0 ? (int)z : (int)z - 1); __m128 min_dist = _mm_set1_ps(2147483647.0); __m128 x_candidate = _mm_setzero_ps(); __m128 y_candidate = _mm_setzero_ps(); __m128 z_candidate = _mm_setzero_ps(); for (int z_cur = z_int - 2; z_cur <= z_int + 2; z_cur++) { for (int y_cur = y_int - 2; y_cur <= y_int + 2; y_cur++) { for (int x_cur = -2; x_cur <= 2; x_cur++) { __m128i x_cur_temp = _mm_add_epi32(x_int, _mm_set1_epi32(x_cur)); __m128 x_pos = _mm_add_ps(_mm_cvtepi32_ps(x_cur_temp), value_noise_3d_sse2(x_cur_temp, y_cur, z_cur, voronoi_noise->seed)); __m128 y_pos = _mm_add_ps(_mm_set1_ps((float)y_cur), value_noise_3d_sse2(x_cur_temp, y_cur, z_cur, voronoi_noise->seed + 1)); __m128 z_pos = _mm_add_ps(_mm_set1_ps((float)z_cur), value_noise_3d_sse2(x_cur_temp, y_cur, z_cur, voronoi_noise->seed + 2)); __m128 x_dist = _mm_sub_ps(x_pos, x_vec); __m128 y_dist = _mm_sub_ps(y_pos, _mm_set1_ps(y)); __m128 z_dist = _mm_sub_ps(z_pos, _mm_set1_ps(z)); __m128 dist = _mm_add_ps(_mm_mul_ps(x_dist, x_dist), _mm_add_ps(_mm_mul_ps(y_dist, y_dist), _mm_mul_ps(z_dist, z_dist))); for (int check_num = 0; check_num < 4; check_num++) { float dist_extract = *(((float *)&dist) + check_num); float min_dist_extract = *(((float *)&min_dist) + check_num); if (dist_extract < min_dist_extract) { *(((float *)&min_dist) + check_num) = dist_extract; *(((float *)&x_candidate) + check_num) = *(((float *)&x_pos) + check_num); *(((float *)&y_candidate) + check_num) = *(((float *)&y_pos) + check_num); *(((float *)&z_candidate) + check_num) = *(((float *)&z_pos) + check_num); } } } } } __m128 value; if (voronoi_noise->enable_distance) { __m128 x_dist = _mm_sub_ps(x_candidate, x_vec); __m128 y_dist = _mm_sub_ps(y_candidate, _mm_set1_ps(y)); __m128 z_dist = _mm_sub_ps(z_candidate, _mm_set1_ps(z)); value = _mm_sub_ps(_mm_mul_ps(_mm_sqrt_ps(_mm_add_ps(_mm_mul_ps(x_dist, x_dist), _mm_add_ps(_mm_mul_ps(y_dist, y_dist), _mm_mul_ps(z_dist, z_dist)))), _mm_set1_ps(SQRT_3)), _mm_set1_ps(1.0)); } else value = _mm_setzero_ps(); for (int floor_candidate_num = 0; floor_candidate_num < 4; floor_candidate_num++) { *(((float *)&x_candidate) + floor_candidate_num) = floor(*(((float *)&x_candidate) + floor_candidate_num)); *(((float *)&y_candidate) + floor_candidate_num) = floor(*(((float *)&y_candidate) + floor_candidate_num)); *(((float *)&z_candidate) + floor_candidate_num) = floor(*(((float *)&z_candidate) + floor_candidate_num)); } _mm_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), _mm_add_ps(value, _mm_mul_ps(_mm_set1_ps(voronoi_noise->displacement), value_noise_3d_sse2_full(_mm_cvtps_epi32(x_candidate), _mm_cvtps_epi32(y_candidate), _mm_cvtps_epi32(z_candidate), voronoi_noise->seed)))); } } } return noise_set; } static inline float *voronoi_noise_eval_3d_sse4_1(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size) { float *noise_set = noise_allocate(sizeof(__m128), sizeof(float) * x_size * y_size * z_size); #pragma omp parallel for collapse(3) if (voronoi_noise->parallel) for (int z_dim = 0; z_dim < z_size; z_dim++) { for (int y_dim = 0; y_dim < y_size; y_dim++) { for (int x_dim = 0; x_dim < x_size; x_dim += 4) { __m128 x_vec = _mm_mul_ps(_mm_add_ps(_mm_set1_ps(voronoi_noise->position[0]), _mm_mul_ps(_mm_set_ps(x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm_set1_ps(voronoi_noise->step))), _mm_set1_ps(voronoi_noise->frequency)); float y = (voronoi_noise->position[1] * voronoi_noise->frequency) + (y_dim * voronoi_noise->step); float z = (voronoi_noise->position[2] * voronoi_noise->frequency) + (z_dim * voronoi_noise->step); __m128i x_int = _mm_cvtps_epi32(_mm_floor_ps(_mm_blendv_ps(_mm_sub_ps(x_vec, _mm_set1_ps(1.0)), x_vec, _mm_cmp_ps(x_vec, _mm_setzero_ps(), _CMP_GT_OQ)))); int y_int = (y > 0.0 ? (int)y : (int)y - 1); int z_int = (z > 0.0 ? (int)z : (int)z - 1); __m128 min_dist = _mm_set1_ps(2147483647.0); __m128 x_candidate = _mm_setzero_ps(); __m128 y_candidate = _mm_setzero_ps(); __m128 z_candidate = _mm_setzero_ps(); for (int z_cur = z_int - 2; z_cur <= z_int + 2; z_cur++) { for (int y_cur = y_int - 2; y_cur <= y_int + 2; y_cur++) { for (int x_cur = -2; x_cur <= 2; x_cur++) { __m128i x_cur_temp = _mm_add_epi32(x_int, _mm_set1_epi32(x_cur)); __m128 x_pos = _mm_add_ps(_mm_cvtepi32_ps(x_cur_temp), value_noise_3d_sse4_1(x_cur_temp, y_cur, z_cur, voronoi_noise->seed)); __m128 y_pos = _mm_add_ps(_mm_set1_ps((float)y_cur), value_noise_3d_sse4_1(x_cur_temp, y_cur, z_cur, voronoi_noise->seed + 1)); __m128 z_pos = _mm_add_ps(_mm_set1_ps((float)z_cur), value_noise_3d_sse4_1(x_cur_temp, y_cur, z_cur, voronoi_noise->seed + 2)); __m128 x_dist = _mm_sub_ps(x_pos, x_vec); __m128 y_dist = _mm_sub_ps(y_pos, _mm_set1_ps(y)); __m128 z_dist = _mm_sub_ps(z_pos, _mm_set1_ps(z)); __m128 dist = _mm_add_ps(_mm_mul_ps(x_dist, x_dist), _mm_add_ps(_mm_mul_ps(y_dist, y_dist), _mm_mul_ps(z_dist, z_dist))); __m128 dist_cmp_mask = _mm_cmp_ps(dist, min_dist, _CMP_LT_OQ); min_dist = _mm_blendv_ps(min_dist, dist, dist_cmp_mask); x_candidate = _mm_blendv_ps(x_candidate, x_pos, dist_cmp_mask); y_candidate = _mm_blendv_ps(y_candidate, y_pos, dist_cmp_mask); z_candidate = _mm_blendv_ps(z_candidate, z_pos, dist_cmp_mask); } } } __m128 value; if (voronoi_noise->enable_distance) { __m128 x_dist = _mm_sub_ps(x_candidate, x_vec); __m128 y_dist = _mm_sub_ps(y_candidate, _mm_set1_ps(y)); __m128 z_dist = _mm_sub_ps(z_candidate, _mm_set1_ps(z)); value = _mm_sub_ps(_mm_mul_ps(_mm_sqrt_ps(_mm_add_ps(_mm_mul_ps(x_dist, x_dist), _mm_add_ps(_mm_mul_ps(y_dist, y_dist), _mm_mul_ps(z_dist, z_dist)))), _mm_set1_ps(SQRT_3)), _mm_set1_ps(1.0)); } else value = _mm_setzero_ps(); _mm_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), _mm_add_ps(value, _mm_mul_ps(_mm_set1_ps(voronoi_noise->displacement), value_noise_3d_sse4_1_full(_mm_cvtps_epi32(_mm_floor_ps(x_candidate)), _mm_cvtps_epi32(_mm_floor_ps(y_candidate)), _mm_cvtps_epi32(_mm_floor_ps(z_candidate)), voronoi_noise->seed)))); } } } return noise_set; } static inline float *voronoi_noise_eval_3d_avx(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size) { float *noise_set = noise_allocate(sizeof(__m256), sizeof(float) * x_size * y_size * z_size); #pragma omp parallel for collapse(3) if (voronoi_noise->parallel) for (int z_dim = 0; z_dim < z_size; z_dim++) { for (int y_dim = 0; y_dim < y_size; y_dim++) { for (int x_dim = 0; x_dim < x_size; x_dim += 8) { __m256 x_vec = _mm256_mul_ps(_mm256_add_ps(_mm256_set1_ps(voronoi_noise->position[0]), _mm256_mul_ps(_mm256_set_ps(x_dim + 7.0, x_dim + 6.0, x_dim + 5.0, x_dim + 4.0, x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm256_set1_ps(voronoi_noise->step))), _mm256_set1_ps(voronoi_noise->frequency)); float y = (voronoi_noise->position[1] + (y_dim * voronoi_noise->step)) * voronoi_noise->frequency; float z = (voronoi_noise->position[2] + (z_dim * voronoi_noise->step)) * voronoi_noise->frequency; __m256i x_int = _mm256_cvtps_epi32(_mm256_floor_ps(_mm256_blendv_ps(_mm256_sub_ps(x_vec, _mm256_set1_ps(1.0)), x_vec, _mm256_cmp_ps(x_vec, _mm256_setzero_ps(), _CMP_GT_OQ)))); int y_int = (y > 0.0 ? (int)y : (int)y - 1); int z_int = (z > 0.0 ? (int)z : (int)z - 1); __m256 min_dist = _mm256_set1_ps(2147483647.0); __m256 x_candidate = _mm256_setzero_ps(); __m256 y_candidate = _mm256_setzero_ps(); __m256 z_candidate = _mm256_setzero_ps(); for (int z_cur = z_int - 2; z_cur <= z_int + 2; z_cur++) { for (int y_cur = y_int - 2; y_cur <= y_int + 2; y_cur++) { for (int x_cur = -2; x_cur <= 2; x_cur++) { __m128i x_cur_temp_low = _mm_add_epi32(_mm256_extractf128_si256(x_int, 0), _mm_set1_epi32(x_cur)); __m128i x_cur_temp_high = _mm_add_epi32(_mm256_extractf128_si256(x_int, 1), _mm_set1_epi32(x_cur)); __m256i x_cur_temp = _mm256_set_m128i(x_cur_temp_high, x_cur_temp_low); __m256 x_pos = _mm256_add_ps(_mm256_cvtepi32_ps(x_cur_temp), value_noise_3d_avx(x_cur_temp, y_cur, z_cur, voronoi_noise->seed)); __m256 y_pos = _mm256_add_ps(_mm256_set1_ps((float)y_cur), value_noise_3d_avx(x_cur_temp, y_cur, z_cur, voronoi_noise->seed + 1)); __m256 z_pos = _mm256_add_ps(_mm256_set1_ps((float)z_cur), value_noise_3d_avx(x_cur_temp, y_cur, z_cur, voronoi_noise->seed + 2)); __m256 x_dist = _mm256_sub_ps(x_pos, x_vec); __m256 y_dist = _mm256_sub_ps(y_pos, _mm256_set1_ps(y)); __m256 z_dist = _mm256_sub_ps(z_pos, _mm256_set1_ps(z)); __m256 dist = _mm256_add_ps(_mm256_mul_ps(x_dist, x_dist), _mm256_add_ps(_mm256_mul_ps(y_dist, y_dist), _mm256_mul_ps(z_dist, z_dist))); __m256 dist_cmp_mask = _mm256_cmp_ps(dist, min_dist, _CMP_LT_OQ); min_dist = _mm256_blendv_ps(min_dist, dist, dist_cmp_mask); x_candidate = _mm256_blendv_ps(x_candidate, x_pos, dist_cmp_mask); y_candidate = _mm256_blendv_ps(y_candidate, y_pos, dist_cmp_mask); z_candidate = _mm256_blendv_ps(z_candidate, z_pos, dist_cmp_mask); } } } __m256 value; if (voronoi_noise->enable_distance) { __m256 x_dist = _mm256_sub_ps(x_candidate, x_vec); __m256 y_dist = _mm256_sub_ps(y_candidate, _mm256_set1_ps(y)); __m256 z_dist = _mm256_sub_ps(z_candidate, _mm256_set1_ps(z)); value = _mm256_sub_ps(_mm256_mul_ps(_mm256_sqrt_ps(_mm256_add_ps(_mm256_mul_ps(x_dist, x_dist), _mm256_add_ps(_mm256_mul_ps(y_dist, y_dist), _mm256_mul_ps(z_dist, z_dist)))), _mm256_set1_ps(SQRT_3)), _mm256_set1_ps(1.0)); } else value = _mm256_setzero_ps(); _mm256_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), _mm256_add_ps(value, _mm256_mul_ps(_mm256_set1_ps(voronoi_noise->displacement), value_noise_3d_avx_full(_mm256_cvtps_epi32(_mm256_floor_ps(x_candidate)), _mm256_cvtps_epi32(_mm256_floor_ps(y_candidate)), _mm256_cvtps_epi32(_mm256_floor_ps(z_candidate)), voronoi_noise->seed)))); } } } return noise_set; } static inline float *voronoi_noise_eval_3d_avx2(struct VoronoiNoise *voronoi_noise, size_t x_size, size_t y_size, size_t z_size) { float *noise_set = noise_allocate(sizeof(__m256), sizeof(float) * x_size * y_size * z_size); #pragma omp parallel for collapse(3) if (voronoi_noise->parallel) for (int z_dim = 0; z_dim < z_size; z_dim++) { for (int y_dim = 0; y_dim < y_size; y_dim++) { for (int x_dim = 0; x_dim < x_size; x_dim += 8) { __m256 x_vec = _mm256_mul_ps(_mm256_add_ps(_mm256_set1_ps(voronoi_noise->position[0]), _mm256_mul_ps(_mm256_set_ps(x_dim + 7.0, x_dim + 6.0, x_dim + 5.0, x_dim + 4.0, x_dim + 3.0, x_dim + 2.0, x_dim + 1.0, x_dim), _mm256_set1_ps(voronoi_noise->step))), _mm256_set1_ps(voronoi_noise->frequency)); float y = (voronoi_noise->position[1] + (y_dim * voronoi_noise->step)) * voronoi_noise->frequency; float z = (voronoi_noise->position[2] + (z_dim * voronoi_noise->step)) * voronoi_noise->frequency; __m256i x_int = _mm256_cvtps_epi32(_mm256_floor_ps(_mm256_blendv_ps(_mm256_sub_ps(x_vec, _mm256_set1_ps(1.0)), x_vec, _mm256_cmp_ps(x_vec, _mm256_setzero_ps(), _CMP_GT_OQ)))); int y_int = (y > 0.0 ? (int)y : (int)y - 1); int z_int = (z > 0.0 ? (int)z : (int)z - 1); __m256 min_dist = _mm256_set1_ps(2147483647.0); __m256 x_candidate = _mm256_setzero_ps(); __m256 y_candidate = _mm256_setzero_ps(); __m256 z_candidate = _mm256_setzero_ps(); for (int z_cur = z_int - 2; z_cur <= z_int + 2; z_cur++) { for (int y_cur = y_int - 2; y_cur <= y_int + 2; y_cur++) { for (int x_cur = -2; x_cur <= 2; x_cur++) { __m256i x_cur_temp = _mm256_add_epi32(x_int, _mm256_set1_epi32(x_cur)); __m256 x_pos = _mm256_add_ps(_mm256_cvtepi32_ps(x_cur_temp), value_noise_3d_avx2(x_cur_temp, y_cur, z_cur, voronoi_noise->seed)); __m256 y_pos = _mm256_add_ps(_mm256_set1_ps((float)y_cur), value_noise_3d_avx2(x_cur_temp, y_cur, z_cur, voronoi_noise->seed + 1)); __m256 z_pos = _mm256_add_ps(_mm256_set1_ps((float)z_cur), value_noise_3d_avx2(x_cur_temp, y_cur, z_cur, voronoi_noise->seed + 2)); __m256 x_dist = _mm256_sub_ps(x_pos, x_vec); __m256 y_dist = _mm256_sub_ps(y_pos, _mm256_set1_ps(y)); __m256 z_dist = _mm256_sub_ps(z_pos, _mm256_set1_ps(z)); __m256 dist = _mm256_add_ps(_mm256_mul_ps(x_dist, x_dist), _mm256_add_ps(_mm256_mul_ps(y_dist, y_dist), _mm256_mul_ps(z_dist, z_dist))); __m256 dist_cmp_mask = _mm256_cmp_ps(dist, min_dist, _CMP_LT_OQ); min_dist = _mm256_blendv_ps(min_dist, dist, dist_cmp_mask); x_candidate = _mm256_blendv_ps(x_candidate, x_pos, dist_cmp_mask); y_candidate = _mm256_blendv_ps(y_candidate, y_pos, dist_cmp_mask); z_candidate = _mm256_blendv_ps(z_candidate, z_pos, dist_cmp_mask); } } } __m256 value; if (voronoi_noise->enable_distance) { __m256 x_dist = _mm256_sub_ps(x_candidate, x_vec); __m256 y_dist = _mm256_sub_ps(y_candidate, _mm256_set1_ps(y)); __m256 z_dist = _mm256_sub_ps(z_candidate, _mm256_set1_ps(z)); value = _mm256_sub_ps(_mm256_mul_ps(_mm256_sqrt_ps(_mm256_add_ps(_mm256_mul_ps(x_dist, x_dist), _mm256_add_ps(_mm256_mul_ps(y_dist, y_dist), _mm256_mul_ps(z_dist, z_dist)))), _mm256_set1_ps(SQRT_3)), _mm256_set1_ps(1.0)); } else value = _mm256_setzero_ps(); _mm256_store_ps(noise_set + (x_dim + (y_dim * x_size) + (z_dim * (x_size * y_size))), _mm256_add_ps(value, _mm256_mul_ps(_mm256_set1_ps(voronoi_noise->displacement), value_noise_3d_avx2_full(_mm256_cvtps_epi32(_mm256_floor_ps(x_candidate)), _mm256_cvtps_epi32(_mm256_floor_ps(y_candidate)), _mm256_cvtps_epi32(_mm256_floor_ps(z_candidate)), voronoi_noise->seed)))); } } } return noise_set; } #endif #endif // VORONOI_NOISE_H
GB_unop__frexpx_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__frexpx_fp32_fp32 // op(A') function: GB_unop_tran__frexpx_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_frexpxf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_frexpxf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_frexpxf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FREXPX || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__frexpx_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpxf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_frexpxf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__frexpx_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rose_v1_true_l2.c
/* * Outer loop: no dependence: * Inner loop: loop-carried dependence * * final dependence graph: * dep SgExprStatement:(a[i])[j] =(((a[i])[j - 1]) + 1); * SgExprStatement:(a[i])[j] =(((a[i])[j - 1]) + 1); * 2*2TRUE_DEP; commonlevel = 2 +precise CarryLevel = 1 * SgPntrArrRefExp:(a[i])[j] * SgPntrArrRefExp:((a[i])[j - 1]) == 0;* 0;||* 0;== -1;||:: */ #include <omp.h> int i; int j; int a[100][100]; void foo() { #pragma omp parallel for private (i,j) for (i = 1; i <= 99; i += 1) { for (j = 1; j <= 99; j += 1) { a[i][j] = a[i][j - 1] + 1; } } }