hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
10cc48fa3629eefe114c8a81a0a93ce466c5ec69.hip
// !!! This is a file automatically generated by hipify!!! #include "cudaSplitEncsegs.h" #include "cudaFlipFlop.h" #include "cudaMesh.h" void splitEncsegs( Real2D &t_pointlist, PStatusD &t_PStatus, IntD &t_trianglelist, IntD &t_neighborlist, IntD &t_tri2subseg, TStatusD &t_TStatus, IntD &t_segmentlist, IntD &t_subseg2tri, IntD &t_subseg2seg, IntD &t_encmarker, IntD &t_enclist, IntD &t_internalmarker, IntD &t_internallist, IntD &t_flipBy, IntD &t_flipActive, IntD &t_linklist, IntD &t_linkslot, IntD &t_emptypoints, IntD &t_emptytriangles, int pointblock, int triblock, int * numberofemptypoints, int * numberofemptytriangles, int * numberofpoints, int * numberoftriangles, int * numberofsubseg, int encmode, REAL theta, int debug_iter ) { int numberofencs; // number of encroached subsegs int numberofdels; // number of subsegs that need to delete their apex int numberofmids; // number of subsegs that need to be inserted midpoint int numberofblocks; int iteration = 0; // loop until there is no encroached subseg left while (true) { // update encroached subsegs active list numberofencs = updateActiveListByMarker_Slot(t_encmarker, t_enclist, *numberofsubseg); #ifdef GQM2D_DEBUG_2 printf("Iteration = %d, number of encroached segments = %d\n", iteration, numberofencs); if (false) { int * debug_el = new int[numberofencs]; hipMemcpy(debug_el, thrust::raw_pointer_cast(&t_enclist[0]), sizeof(int)*numberofencs, hipMemcpyDeviceToHost); for (int i = 0; i < numberofencs; i++) printf("%d ", debug_el[i]); printf("\n"); delete[] debug_el; } #endif if (numberofencs == 0) break; // use internal marker and list for deletion // init deletion marker t_internalmarker.resize(numberofencs); thrust::copy(t_enclist.begin(), t_enclist.end(), t_internalmarker.begin()); // delete all points inside diametral circle int step = 0; while (true) { // update deletion subsegs active list and marker numberofdels = updateActiveListByMarker_Val(t_internalmarker, t_internallist, t_internalmarker.size()); #ifdef GQM2D_DEBUG_2 printf(" numberofdels = %d\n", numberofdels); #endif if (numberofdels == 0) break; t_internalmarker.resize(numberofdels); // mark reduntant points markReduntantPoints( t_pointlist, t_PStatus, t_trianglelist, t_neighborlist, t_TStatus, t_subseg2tri, t_internalmarker, t_internallist, numberofdels); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif // maintain denauly property, do flip-flop flipFlop( t_pointlist, t_PStatus, t_trianglelist, t_neighborlist, t_tri2subseg, t_TStatus, t_subseg2tri, t_flipBy, t_flipActive, t_encmarker, t_linklist, t_linkslot, *numberoftriangles, encmode, theta, -1, -1); // check if encroachment markers are updated correctly #ifdef GQM2D_DEBUG_3 { printf(" Iteration %d, Step %d: After Remove redundant points\n", iteration, step); int * debug_em = new int[*numberofsubseg]; int * debug_tl = new int[3 * (*numberoftriangles)]; int * debug_nl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; int * debug_st = new int[*numberofsubseg]; hipMemcpy(debug_em, thrust::raw_pointer_cast(&t_encmarker[0]), sizeof(int)*(*numberofsubseg), hipMemcpyDeviceToHost); hipMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, hipMemcpyDeviceToHost); hipMemcpy(debug_nl, thrust::raw_pointer_cast(&t_neighborlist[0]), sizeof(int) * 3 * *numberoftriangles, hipMemcpyDeviceToHost); hipMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, hipMemcpyDeviceToHost); hipMemcpy(debug_st, thrust::raw_pointer_cast(&t_subseg2tri[0]), sizeof(int)**numberofsubseg, hipMemcpyDeviceToHost); for (int i = 0; i < *numberofsubseg; i++) { int otri = debug_st[i]; int tri = otri >> 2; int ori = otri & 3; int p[3]; REAL2 v[3]; p[0] = debug_tl[3 * tri + (ori + 1) % 3]; p[1] = debug_tl[3 * tri + (ori + 2) % 3]; p[2] = debug_tl[3 * tri + ori]; v[0] = debug_pl[p[0]]; v[1] = debug_pl[p[1]]; v[2] = debug_pl[p[2]]; bool tag = false; // indicate if this segment is encroached or not REAL goodcoss = cos(theta * PI / 180.0); goodcoss *= goodcoss; REAL dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } otri = debug_nl[3 * tri + ori]; if (otri != -1) { tri = otri >> 2; ori = otri & 3; p[2] = debug_tl[3 * tri + ori]; v[2] = debug_pl[p[2]]; dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } } if (debug_em[i] == -1 && tag) printf(" iteration = %d, step = %d, Segment %d: I am encroached but marked as non-encroached\n", iteration, step, i); if (debug_em[i] == 0 && !tag) printf(" iteration = %d, step = %d, Segment %d: I am not encroached but marked as encroached\n", iteration, step, i); //if( debug_em[i] == 1) // printf("Line 3362, iteration = %d, step = %d, Segment %d: I am marked as encroached because I am on segment\n",iteration,step,i); } delete[] debug_em; delete[] debug_tl; delete[] debug_nl; delete[] debug_pl; delete[] debug_st; printf(" Finished Checking\n"); } #endif step++; } #ifdef GQM2D_DEBUG_3 { gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); printf(" Iteration %d: After Remove all redundant points\n", iteration); int * debug_tl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; TStatus * debug_ts = new TStatus[*numberoftriangles]; hipMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, hipMemcpyDeviceToHost); hipMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, hipMemcpyDeviceToHost); hipMemcpy(debug_ts, thrust::raw_pointer_cast(&t_TStatus[0]), sizeof(TStatus)**numberoftriangles, hipMemcpyDeviceToHost); for (int i = 0; i < *numberoftriangles; i++) { if (!debug_ts[i].isNull()) { bool errorflag = false; int p[3]; REAL2 v[3]; for (int j = 0; j < 3; j++) { p[j] = debug_tl[3 * i + j]; v[j] = debug_pl[p[j]]; } for (int j = 0; j < 2; j++) { for (int k = j + 1; k < 3; k++) { if (v[j].x == v[k].x && v[j].y == v[k].y) { errorflag = true; } } } if (errorflag) printf(" After remove redundant points - Tri %d: Duplicate vertice\n", i); } } delete[] debug_tl; delete[] debug_pl; delete[] debug_ts; printf(" Finished Checking\n"); } #endif // check if there is enough space // numberofencs points are going to be inserted if (numberofencs > *numberofemptypoints) { *numberofemptypoints = updateEmptyPoints(t_PStatus, t_emptypoints); int num = 0; while (numberofencs > *numberofemptypoints + num*pointblock) num++; if (num != 0) { int old_size = t_PStatus.size(); PStatus emptyPoint; emptyPoint.setDeleted(); t_pointlist.resize(old_size + num*pointblock); t_PStatus.resize(old_size + num*pointblock, emptyPoint); *numberofemptypoints = updateEmptyPoints(t_PStatus, t_emptypoints); } } if (2 * numberofencs > *numberofemptytriangles) { *numberofemptytriangles = updateEmptyTriangles(t_TStatus, t_emptytriangles); int num = 0; while (2 * numberofencs > *numberofemptytriangles + num*triblock) num++; if (num != 0) { int old_size = t_TStatus.size(); TStatus emptyTri(true, false, false); t_trianglelist.resize(3 * (old_size + num*triblock)); t_neighborlist.resize(3 * (old_size + num*triblock)); t_tri2subseg.resize(3 * (old_size + num*triblock), -1); t_TStatus.resize(old_size + num*triblock, emptyTri); t_flipBy.resize(old_size + num*triblock); *numberofemptytriangles = updateEmptyTriangles(t_TStatus, t_emptytriangles); } } t_subseg2tri.resize(*numberofsubseg + numberofencs); t_subseg2seg.resize(*numberofsubseg + numberofencs); // use internal marker and list for insertion subsegs, use t_flipBy as insertion marker // init insertion subseg marker t_encmarker.resize(*numberofsubseg + numberofencs, -1); t_internalmarker.resize(numberofencs); thrust::copy(t_enclist.begin(), t_enclist.end(), t_internalmarker.begin()); // split all encroached subsegs while (true) { // inside one triangle, more than one segment may split, violation may happen // update insertion subsegs active list and marker // t_internallist store the indices for t_enclist // in order to keep thread id for kernels, do not resize t_internalmarker numberofmids = updateActiveListByMarker_Slot(t_internalmarker, t_internallist, numberofencs); #ifdef GQM2D_DEBUG_2 printf(" numberofmids = %d\n", numberofmids); #endif if (numberofmids == 0) break; // reset insertion (triangles) marker: t_flipBy and t_flipOri numberofblocks = (ceil)((float)numberofmids / BLOCK_SIZE); kernelResetMidInsertionMarker << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_flipBy[0]), thrust::raw_pointer_cast(&t_internalmarker[0]), thrust::raw_pointer_cast(&t_internallist[0]), numberofmids); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif // mark insertion triangles numberofblocks = (ceil)((float)numberofmids / BLOCK_SIZE); kernelMarkMidInsertion << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_flipBy[0]), thrust::raw_pointer_cast(&t_internalmarker[0]), thrust::raw_pointer_cast(&t_internallist[0]), numberofmids, thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), debug_iter); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif // insert points for winners numberofblocks = (ceil)((float)numberofmids / BLOCK_SIZE); kernelInsertMidPoints << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_PStatus[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_segmentlist[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_subseg2seg[0]), thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_internalmarker[0]), thrust::raw_pointer_cast(&t_internallist[0]), thrust::raw_pointer_cast(&t_flipBy[0]), thrust::raw_pointer_cast(&t_emptypoints[0]), thrust::raw_pointer_cast(&t_emptytriangles[0]), t_emptypoints.size(), t_emptytriangles.size(), *numberofemptypoints, *numberofemptytriangles, *numberofsubseg, numberofmids, encmode, theta, iteration); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif // update neighbors information numberofblocks = (ceil)((float)numberofmids / BLOCK_SIZE); kernelUpdateMidNeighbors << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_enclist[0]), thrust::raw_pointer_cast(&t_internalmarker[0]), thrust::raw_pointer_cast(&t_internallist[0]), numberofmids, encmode, theta); #ifdef GQM2D_DEBUG gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); #endif } // Update iteration variables // (1) check if there are any slots before last points/triangles // (2) update last points/triangles/subsegs // (3) update number of empty points/triangles int slot_before, slot_after; // point slots slot_after = t_PStatus.size() - *numberofpoints; slot_before = *numberofemptypoints - slot_after; if (slot_before < numberofencs) *numberofpoints += numberofencs - slot_before; *numberofemptypoints -= numberofencs; // triangle slots slot_after = t_TStatus.size() - *numberoftriangles; slot_before = *numberofemptytriangles - slot_after; if (slot_before < 2 * numberofencs) *numberoftriangles += 2 * numberofencs - slot_before; *numberofemptytriangles -= 2 * numberofencs; // subseg *numberofsubseg += numberofencs; #ifdef GQM2D_DEBUG_3 // check if encroachment markers are updated correctly { printf(" Iteration %d: After Insert mid points\n", iteration); int * debug_em = new int[*numberofsubseg]; int * debug_tl = new int[3 * (*numberoftriangles)]; int * debug_nl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; int * debug_st = new int[*numberofsubseg]; hipMemcpy(debug_em, thrust::raw_pointer_cast(&t_encmarker[0]), sizeof(int)*(*numberofsubseg), hipMemcpyDeviceToHost); hipMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, hipMemcpyDeviceToHost); hipMemcpy(debug_nl, thrust::raw_pointer_cast(&t_neighborlist[0]), sizeof(int) * 3 * *numberoftriangles, hipMemcpyDeviceToHost); hipMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, hipMemcpyDeviceToHost); hipMemcpy(debug_st, thrust::raw_pointer_cast(&t_subseg2tri[0]), sizeof(int)**numberofsubseg, hipMemcpyDeviceToHost); for (int i = 0; i < *numberofsubseg; i++) { int otri = debug_st[i]; int tri = otri >> 2; int ori = otri & 3; int p[3]; REAL2 v[3]; p[0] = debug_tl[3 * tri + (ori + 1) % 3]; p[1] = debug_tl[3 * tri + (ori + 2) % 3]; p[2] = debug_tl[3 * tri + ori]; v[0] = debug_pl[p[0]]; v[1] = debug_pl[p[1]]; v[2] = debug_pl[p[2]]; bool tag = false; // indicate if this segment is encroached or not REAL goodcoss = cos(theta * PI / 180.0); goodcoss *= goodcoss; REAL dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } otri = debug_nl[3 * tri + ori]; if (otri != -1) { tri = otri >> 2; ori = otri & 3; p[2] = debug_tl[3 * tri + ori]; v[2] = debug_pl[p[2]]; dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } } if (debug_em[i] == -1 && tag) printf(" iteration = %d, Segment %d: I am encroached but marked as non-encroached\n", i, iteration); if (debug_em[i] == 0 && !tag) printf(" iteration = %d, Segment %d: I am not encroached but marked as encroached\n", i, iteration); } delete[] debug_em; delete[] debug_tl; delete[] debug_nl; delete[] debug_pl; delete[] debug_st; } // Check if contain duplicate vertices { int * debug_tl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; TStatus * debug_ts = new TStatus[*numberoftriangles]; hipMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, hipMemcpyDeviceToHost); hipMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, hipMemcpyDeviceToHost); hipMemcpy(debug_ts, thrust::raw_pointer_cast(&t_TStatus[0]), sizeof(TStatus)**numberoftriangles, hipMemcpyDeviceToHost); for (int i = 0; i < *numberoftriangles; i++) { if (!debug_ts[i].isNull()) { bool errorflag = false; int p[3]; REAL2 v[3]; for (int j = 0; j<3; j++) { p[j] = debug_tl[3 * i + j]; v[j] = debug_pl[p[j]]; } for (int j = 0; j<2; j++) { for (int k = j + 1; k<3; k++) { if (v[j].x == v[k].x && v[j].y == v[k].y) { errorflag = true; } } } if (errorflag) printf(" After insert midpoints - Tri %d (%d, %d, %d): Duplicate vertice\n", i, p[0], p[1], p[2]); } } delete[] debug_tl; delete[] debug_pl; delete[] debug_ts; } #endif // maintain denauly property, do flip-flop flipFlop( t_pointlist, t_PStatus, t_trianglelist, t_neighborlist, t_tri2subseg, t_TStatus, t_subseg2tri, t_flipBy, t_flipActive, t_encmarker, t_linklist, t_linkslot, *numberoftriangles, encmode, theta, -1, -1); // check if encroachment markers are updated correctly #ifdef GQM2D_DEBUG_3 { printf(" Iteration %d: After Insert midpoints and flipFlop\n", iteration); int * debug_em = new int[*numberofsubseg]; int * debug_tl = new int[3 * (*numberoftriangles)]; int * debug_nl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; int * debug_st = new int[*numberofsubseg]; hipMemcpy(debug_em, thrust::raw_pointer_cast(&t_encmarker[0]), sizeof(int)*(*numberofsubseg), hipMemcpyDeviceToHost); hipMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, hipMemcpyDeviceToHost); hipMemcpy(debug_nl, thrust::raw_pointer_cast(&t_neighborlist[0]), sizeof(int) * 3 * *numberoftriangles, hipMemcpyDeviceToHost); hipMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, hipMemcpyDeviceToHost); hipMemcpy(debug_st, thrust::raw_pointer_cast(&t_subseg2tri[0]), sizeof(int)**numberofsubseg, hipMemcpyDeviceToHost); for (int i = 0; i< *numberofsubseg; i++) { int otri = debug_st[i]; int tri = otri >> 2; int ori = otri & 3; int p[3]; REAL2 v[3]; p[0] = debug_tl[3 * tri + (ori + 1) % 3]; p[1] = debug_tl[3 * tri + (ori + 2) % 3]; p[2] = debug_tl[3 * tri + ori]; v[0] = debug_pl[p[0]]; v[1] = debug_pl[p[1]]; v[2] = debug_pl[p[2]]; bool tag = false; // indicate if this segment is encroached or not REAL goodcoss = cos(theta * PI / 180.0); goodcoss *= goodcoss; REAL dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } otri = debug_nl[3 * tri + ori]; if (otri != -1) { tri = otri >> 2; ori = otri & 3; p[2] = debug_tl[3 * tri + ori]; v[2] = debug_pl[p[2]]; dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } } if (debug_em[i] == -1 && tag) printf(" iteration = %d, Segment %d: I am encroached but marked as non-encroached\n", i, iteration); if (debug_em[i] == 0 && !tag) printf(" iteration = %d, Segment %d: I am not encroached but marked as encroached\n", i, iteration); } printf("Finished Checking\n"); } #endif iteration++; } //printf("splitEncsegs - totally %d iterations\n",iteration); }
10cc48fa3629eefe114c8a81a0a93ce466c5ec69.cu
#include "cudaSplitEncsegs.h" #include "cudaFlipFlop.h" #include "cudaMesh.h" void splitEncsegs( Real2D &t_pointlist, PStatusD &t_PStatus, IntD &t_trianglelist, IntD &t_neighborlist, IntD &t_tri2subseg, TStatusD &t_TStatus, IntD &t_segmentlist, IntD &t_subseg2tri, IntD &t_subseg2seg, IntD &t_encmarker, IntD &t_enclist, IntD &t_internalmarker, IntD &t_internallist, IntD &t_flipBy, IntD &t_flipActive, IntD &t_linklist, IntD &t_linkslot, IntD &t_emptypoints, IntD &t_emptytriangles, int pointblock, int triblock, int * numberofemptypoints, int * numberofemptytriangles, int * numberofpoints, int * numberoftriangles, int * numberofsubseg, int encmode, REAL theta, int debug_iter ) { int numberofencs; // number of encroached subsegs int numberofdels; // number of subsegs that need to delete their apex int numberofmids; // number of subsegs that need to be inserted midpoint int numberofblocks; int iteration = 0; // loop until there is no encroached subseg left while (true) { // update encroached subsegs active list numberofencs = updateActiveListByMarker_Slot(t_encmarker, t_enclist, *numberofsubseg); #ifdef GQM2D_DEBUG_2 printf("Iteration = %d, number of encroached segments = %d\n", iteration, numberofencs); if (false) { int * debug_el = new int[numberofencs]; cudaMemcpy(debug_el, thrust::raw_pointer_cast(&t_enclist[0]), sizeof(int)*numberofencs, cudaMemcpyDeviceToHost); for (int i = 0; i < numberofencs; i++) printf("%d ", debug_el[i]); printf("\n"); delete[] debug_el; } #endif if (numberofencs == 0) break; // use internal marker and list for deletion // init deletion marker t_internalmarker.resize(numberofencs); thrust::copy(t_enclist.begin(), t_enclist.end(), t_internalmarker.begin()); // delete all points inside diametral circle int step = 0; while (true) { // update deletion subsegs active list and marker numberofdels = updateActiveListByMarker_Val(t_internalmarker, t_internallist, t_internalmarker.size()); #ifdef GQM2D_DEBUG_2 printf(" numberofdels = %d\n", numberofdels); #endif if (numberofdels == 0) break; t_internalmarker.resize(numberofdels); // mark reduntant points markReduntantPoints( t_pointlist, t_PStatus, t_trianglelist, t_neighborlist, t_TStatus, t_subseg2tri, t_internalmarker, t_internallist, numberofdels); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif // maintain denauly property, do flip-flop flipFlop( t_pointlist, t_PStatus, t_trianglelist, t_neighborlist, t_tri2subseg, t_TStatus, t_subseg2tri, t_flipBy, t_flipActive, t_encmarker, t_linklist, t_linkslot, *numberoftriangles, encmode, theta, -1, -1); // check if encroachment markers are updated correctly #ifdef GQM2D_DEBUG_3 { printf(" Iteration %d, Step %d: After Remove redundant points\n", iteration, step); int * debug_em = new int[*numberofsubseg]; int * debug_tl = new int[3 * (*numberoftriangles)]; int * debug_nl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; int * debug_st = new int[*numberofsubseg]; cudaMemcpy(debug_em, thrust::raw_pointer_cast(&t_encmarker[0]), sizeof(int)*(*numberofsubseg), cudaMemcpyDeviceToHost); cudaMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, cudaMemcpyDeviceToHost); cudaMemcpy(debug_nl, thrust::raw_pointer_cast(&t_neighborlist[0]), sizeof(int) * 3 * *numberoftriangles, cudaMemcpyDeviceToHost); cudaMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, cudaMemcpyDeviceToHost); cudaMemcpy(debug_st, thrust::raw_pointer_cast(&t_subseg2tri[0]), sizeof(int)**numberofsubseg, cudaMemcpyDeviceToHost); for (int i = 0; i < *numberofsubseg; i++) { int otri = debug_st[i]; int tri = otri >> 2; int ori = otri & 3; int p[3]; REAL2 v[3]; p[0] = debug_tl[3 * tri + (ori + 1) % 3]; p[1] = debug_tl[3 * tri + (ori + 2) % 3]; p[2] = debug_tl[3 * tri + ori]; v[0] = debug_pl[p[0]]; v[1] = debug_pl[p[1]]; v[2] = debug_pl[p[2]]; bool tag = false; // indicate if this segment is encroached or not REAL goodcoss = cos(theta * PI / 180.0); goodcoss *= goodcoss; REAL dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } otri = debug_nl[3 * tri + ori]; if (otri != -1) { tri = otri >> 2; ori = otri & 3; p[2] = debug_tl[3 * tri + ori]; v[2] = debug_pl[p[2]]; dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } } if (debug_em[i] == -1 && tag) printf(" iteration = %d, step = %d, Segment %d: I am encroached but marked as non-encroached\n", iteration, step, i); if (debug_em[i] == 0 && !tag) printf(" iteration = %d, step = %d, Segment %d: I am not encroached but marked as encroached\n", iteration, step, i); //if( debug_em[i] == 1) // printf("Line 3362, iteration = %d, step = %d, Segment %d: I am marked as encroached because I am on segment\n",iteration,step,i); } delete[] debug_em; delete[] debug_tl; delete[] debug_nl; delete[] debug_pl; delete[] debug_st; printf(" Finished Checking\n"); } #endif step++; } #ifdef GQM2D_DEBUG_3 { gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); printf(" Iteration %d: After Remove all redundant points\n", iteration); int * debug_tl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; TStatus * debug_ts = new TStatus[*numberoftriangles]; cudaMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, cudaMemcpyDeviceToHost); cudaMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, cudaMemcpyDeviceToHost); cudaMemcpy(debug_ts, thrust::raw_pointer_cast(&t_TStatus[0]), sizeof(TStatus)**numberoftriangles, cudaMemcpyDeviceToHost); for (int i = 0; i < *numberoftriangles; i++) { if (!debug_ts[i].isNull()) { bool errorflag = false; int p[3]; REAL2 v[3]; for (int j = 0; j < 3; j++) { p[j] = debug_tl[3 * i + j]; v[j] = debug_pl[p[j]]; } for (int j = 0; j < 2; j++) { for (int k = j + 1; k < 3; k++) { if (v[j].x == v[k].x && v[j].y == v[k].y) { errorflag = true; } } } if (errorflag) printf(" After remove redundant points - Tri %d: Duplicate vertice\n", i); } } delete[] debug_tl; delete[] debug_pl; delete[] debug_ts; printf(" Finished Checking\n"); } #endif // check if there is enough space // numberofencs points are going to be inserted if (numberofencs > *numberofemptypoints) { *numberofemptypoints = updateEmptyPoints(t_PStatus, t_emptypoints); int num = 0; while (numberofencs > *numberofemptypoints + num*pointblock) num++; if (num != 0) { int old_size = t_PStatus.size(); PStatus emptyPoint; emptyPoint.setDeleted(); t_pointlist.resize(old_size + num*pointblock); t_PStatus.resize(old_size + num*pointblock, emptyPoint); *numberofemptypoints = updateEmptyPoints(t_PStatus, t_emptypoints); } } if (2 * numberofencs > *numberofemptytriangles) { *numberofemptytriangles = updateEmptyTriangles(t_TStatus, t_emptytriangles); int num = 0; while (2 * numberofencs > *numberofemptytriangles + num*triblock) num++; if (num != 0) { int old_size = t_TStatus.size(); TStatus emptyTri(true, false, false); t_trianglelist.resize(3 * (old_size + num*triblock)); t_neighborlist.resize(3 * (old_size + num*triblock)); t_tri2subseg.resize(3 * (old_size + num*triblock), -1); t_TStatus.resize(old_size + num*triblock, emptyTri); t_flipBy.resize(old_size + num*triblock); *numberofemptytriangles = updateEmptyTriangles(t_TStatus, t_emptytriangles); } } t_subseg2tri.resize(*numberofsubseg + numberofencs); t_subseg2seg.resize(*numberofsubseg + numberofencs); // use internal marker and list for insertion subsegs, use t_flipBy as insertion marker // init insertion subseg marker t_encmarker.resize(*numberofsubseg + numberofencs, -1); t_internalmarker.resize(numberofencs); thrust::copy(t_enclist.begin(), t_enclist.end(), t_internalmarker.begin()); // split all encroached subsegs while (true) { // inside one triangle, more than one segment may split, violation may happen // update insertion subsegs active list and marker // t_internallist store the indices for t_enclist // in order to keep thread id for kernels, do not resize t_internalmarker numberofmids = updateActiveListByMarker_Slot(t_internalmarker, t_internallist, numberofencs); #ifdef GQM2D_DEBUG_2 printf(" numberofmids = %d\n", numberofmids); #endif if (numberofmids == 0) break; // reset insertion (triangles) marker: t_flipBy and t_flipOri numberofblocks = (ceil)((float)numberofmids / BLOCK_SIZE); kernelResetMidInsertionMarker << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_flipBy[0]), thrust::raw_pointer_cast(&t_internalmarker[0]), thrust::raw_pointer_cast(&t_internallist[0]), numberofmids); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif // mark insertion triangles numberofblocks = (ceil)((float)numberofmids / BLOCK_SIZE); kernelMarkMidInsertion << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_flipBy[0]), thrust::raw_pointer_cast(&t_internalmarker[0]), thrust::raw_pointer_cast(&t_internallist[0]), numberofmids, thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), debug_iter); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif // insert points for winners numberofblocks = (ceil)((float)numberofmids / BLOCK_SIZE); kernelInsertMidPoints << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_PStatus[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_segmentlist[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_subseg2seg[0]), thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_internalmarker[0]), thrust::raw_pointer_cast(&t_internallist[0]), thrust::raw_pointer_cast(&t_flipBy[0]), thrust::raw_pointer_cast(&t_emptypoints[0]), thrust::raw_pointer_cast(&t_emptytriangles[0]), t_emptypoints.size(), t_emptytriangles.size(), *numberofemptypoints, *numberofemptytriangles, *numberofsubseg, numberofmids, encmode, theta, iteration); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif // update neighbors information numberofblocks = (ceil)((float)numberofmids / BLOCK_SIZE); kernelUpdateMidNeighbors << <numberofblocks, BLOCK_SIZE >> >( thrust::raw_pointer_cast(&t_pointlist[0]), thrust::raw_pointer_cast(&t_trianglelist[0]), thrust::raw_pointer_cast(&t_neighborlist[0]), thrust::raw_pointer_cast(&t_tri2subseg[0]), thrust::raw_pointer_cast(&t_TStatus[0]), thrust::raw_pointer_cast(&t_subseg2tri[0]), thrust::raw_pointer_cast(&t_encmarker[0]), thrust::raw_pointer_cast(&t_enclist[0]), thrust::raw_pointer_cast(&t_internalmarker[0]), thrust::raw_pointer_cast(&t_internallist[0]), numberofmids, encmode, theta); #ifdef GQM2D_DEBUG gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); #endif } // Update iteration variables // (1) check if there are any slots before last points/triangles // (2) update last points/triangles/subsegs // (3) update number of empty points/triangles int slot_before, slot_after; // point slots slot_after = t_PStatus.size() - *numberofpoints; slot_before = *numberofemptypoints - slot_after; if (slot_before < numberofencs) *numberofpoints += numberofencs - slot_before; *numberofemptypoints -= numberofencs; // triangle slots slot_after = t_TStatus.size() - *numberoftriangles; slot_before = *numberofemptytriangles - slot_after; if (slot_before < 2 * numberofencs) *numberoftriangles += 2 * numberofencs - slot_before; *numberofemptytriangles -= 2 * numberofencs; // subseg *numberofsubseg += numberofencs; #ifdef GQM2D_DEBUG_3 // check if encroachment markers are updated correctly { printf(" Iteration %d: After Insert mid points\n", iteration); int * debug_em = new int[*numberofsubseg]; int * debug_tl = new int[3 * (*numberoftriangles)]; int * debug_nl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; int * debug_st = new int[*numberofsubseg]; cudaMemcpy(debug_em, thrust::raw_pointer_cast(&t_encmarker[0]), sizeof(int)*(*numberofsubseg), cudaMemcpyDeviceToHost); cudaMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, cudaMemcpyDeviceToHost); cudaMemcpy(debug_nl, thrust::raw_pointer_cast(&t_neighborlist[0]), sizeof(int) * 3 * *numberoftriangles, cudaMemcpyDeviceToHost); cudaMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, cudaMemcpyDeviceToHost); cudaMemcpy(debug_st, thrust::raw_pointer_cast(&t_subseg2tri[0]), sizeof(int)**numberofsubseg, cudaMemcpyDeviceToHost); for (int i = 0; i < *numberofsubseg; i++) { int otri = debug_st[i]; int tri = otri >> 2; int ori = otri & 3; int p[3]; REAL2 v[3]; p[0] = debug_tl[3 * tri + (ori + 1) % 3]; p[1] = debug_tl[3 * tri + (ori + 2) % 3]; p[2] = debug_tl[3 * tri + ori]; v[0] = debug_pl[p[0]]; v[1] = debug_pl[p[1]]; v[2] = debug_pl[p[2]]; bool tag = false; // indicate if this segment is encroached or not REAL goodcoss = cos(theta * PI / 180.0); goodcoss *= goodcoss; REAL dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } otri = debug_nl[3 * tri + ori]; if (otri != -1) { tri = otri >> 2; ori = otri & 3; p[2] = debug_tl[3 * tri + ori]; v[2] = debug_pl[p[2]]; dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } } if (debug_em[i] == -1 && tag) printf(" iteration = %d, Segment %d: I am encroached but marked as non-encroached\n", i, iteration); if (debug_em[i] == 0 && !tag) printf(" iteration = %d, Segment %d: I am not encroached but marked as encroached\n", i, iteration); } delete[] debug_em; delete[] debug_tl; delete[] debug_nl; delete[] debug_pl; delete[] debug_st; } // Check if contain duplicate vertices { int * debug_tl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; TStatus * debug_ts = new TStatus[*numberoftriangles]; cudaMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, cudaMemcpyDeviceToHost); cudaMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, cudaMemcpyDeviceToHost); cudaMemcpy(debug_ts, thrust::raw_pointer_cast(&t_TStatus[0]), sizeof(TStatus)**numberoftriangles, cudaMemcpyDeviceToHost); for (int i = 0; i < *numberoftriangles; i++) { if (!debug_ts[i].isNull()) { bool errorflag = false; int p[3]; REAL2 v[3]; for (int j = 0; j<3; j++) { p[j] = debug_tl[3 * i + j]; v[j] = debug_pl[p[j]]; } for (int j = 0; j<2; j++) { for (int k = j + 1; k<3; k++) { if (v[j].x == v[k].x && v[j].y == v[k].y) { errorflag = true; } } } if (errorflag) printf(" After insert midpoints - Tri %d (%d, %d, %d): Duplicate vertice\n", i, p[0], p[1], p[2]); } } delete[] debug_tl; delete[] debug_pl; delete[] debug_ts; } #endif // maintain denauly property, do flip-flop flipFlop( t_pointlist, t_PStatus, t_trianglelist, t_neighborlist, t_tri2subseg, t_TStatus, t_subseg2tri, t_flipBy, t_flipActive, t_encmarker, t_linklist, t_linkslot, *numberoftriangles, encmode, theta, -1, -1); // check if encroachment markers are updated correctly #ifdef GQM2D_DEBUG_3 { printf(" Iteration %d: After Insert midpoints and flipFlop\n", iteration); int * debug_em = new int[*numberofsubseg]; int * debug_tl = new int[3 * (*numberoftriangles)]; int * debug_nl = new int[3 * (*numberoftriangles)]; REAL2 * debug_pl = new REAL2[*numberofpoints]; int * debug_st = new int[*numberofsubseg]; cudaMemcpy(debug_em, thrust::raw_pointer_cast(&t_encmarker[0]), sizeof(int)*(*numberofsubseg), cudaMemcpyDeviceToHost); cudaMemcpy(debug_tl, thrust::raw_pointer_cast(&t_trianglelist[0]), sizeof(int) * 3 * *numberoftriangles, cudaMemcpyDeviceToHost); cudaMemcpy(debug_nl, thrust::raw_pointer_cast(&t_neighborlist[0]), sizeof(int) * 3 * *numberoftriangles, cudaMemcpyDeviceToHost); cudaMemcpy(debug_pl, thrust::raw_pointer_cast(&t_pointlist[0]), sizeof(REAL2)**numberofpoints, cudaMemcpyDeviceToHost); cudaMemcpy(debug_st, thrust::raw_pointer_cast(&t_subseg2tri[0]), sizeof(int)**numberofsubseg, cudaMemcpyDeviceToHost); for (int i = 0; i< *numberofsubseg; i++) { int otri = debug_st[i]; int tri = otri >> 2; int ori = otri & 3; int p[3]; REAL2 v[3]; p[0] = debug_tl[3 * tri + (ori + 1) % 3]; p[1] = debug_tl[3 * tri + (ori + 2) % 3]; p[2] = debug_tl[3 * tri + ori]; v[0] = debug_pl[p[0]]; v[1] = debug_pl[p[1]]; v[2] = debug_pl[p[2]]; bool tag = false; // indicate if this segment is encroached or not REAL goodcoss = cos(theta * PI / 180.0); goodcoss *= goodcoss; REAL dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } otri = debug_nl[3 * tri + ori]; if (otri != -1) { tri = otri >> 2; ori = otri & 3; p[2] = debug_tl[3 * tri + ori]; v[2] = debug_pl[p[2]]; dotproduct = (v[0].x - v[2].x)*(v[1].x - v[2].x) + (v[0].y - v[2].y)*(v[1].y - v[2].y); if (dotproduct < 0.0) // angle > 90 { // here, we use diametral lens to speedup the algorithm if (encmode || dotproduct * dotproduct >= (2.0*goodcoss - 1.0)*(2.0*goodcoss - 1.0) * ((v[0].x - v[2].x)*(v[0].x - v[2].x) + (v[0].y - v[2].y)*(v[0].y - v[2].y)) * ((v[1].x - v[2].x)*(v[1].x - v[2].x) + (v[1].y - v[2].y)*(v[1].y - v[2].y))) tag = true; } } if (debug_em[i] == -1 && tag) printf(" iteration = %d, Segment %d: I am encroached but marked as non-encroached\n", i, iteration); if (debug_em[i] == 0 && !tag) printf(" iteration = %d, Segment %d: I am not encroached but marked as encroached\n", i, iteration); } printf("Finished Checking\n"); } #endif iteration++; } //printf("splitEncsegs - totally %d iterations\n",iteration); }
3fa942207f43965f9c9c3bfd9332683e9db35c14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/scalar/scalar.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/cudf_gtest.hpp> #include <tests/utilities/type_list_utilities.hpp> #include <tests/utilities/type_lists.hpp> #include <thrust/sequence.h> #include <random> #include <gmock/gmock.h> template <typename T> struct TypedScalarDeviceViewTest : public cudf::test::BaseFixture {}; TYPED_TEST_CASE(TypedScalarDeviceViewTest, cudf::test::FixedWidthTypes); template <typename ScalarDeviceViewType> __global__ void test_set_value(ScalarDeviceViewType s, ScalarDeviceViewType s1) { s1.set_value(s.value()); s1.set_valid(true); } template <typename ScalarDeviceViewType> __global__ void test_value(ScalarDeviceViewType s, ScalarDeviceViewType s1, bool *result) { *result = (s.value() == s1.value()); } TYPED_TEST(TypedScalarDeviceViewTest, Value) { TypeParam value{7}; cudf::experimental::scalar_type_t<TypeParam> s(value); cudf::experimental::scalar_type_t<TypeParam> s1; auto scalar_device_view = cudf::get_scalar_device_view(s); auto scalar_device_view1 = cudf::get_scalar_device_view(s1); rmm::device_scalar<bool> result; hipLaunchKernelGGL(( test_set_value), dim3(1), dim3(1), 0, 0, scalar_device_view, scalar_device_view1); CHECK_CUDA(0); EXPECT_EQ(s1.value(), value); EXPECT_TRUE(s1.is_valid()); hipLaunchKernelGGL(( test_value), dim3(1), dim3(1), 0, 0, scalar_device_view, scalar_device_view1, result.data()); CHECK_CUDA(0); EXPECT_TRUE(result.value()); } template <typename ScalarDeviceViewType> __global__ void test_null(ScalarDeviceViewType s, bool* result) { *result = s.is_valid(); } TYPED_TEST(TypedScalarDeviceViewTest, ConstructNull) { TypeParam value = 5; cudf::experimental::scalar_type_t<TypeParam> s(value, false); auto scalar_device_view = cudf::get_scalar_device_view(s); rmm::device_scalar<bool> result; hipLaunchKernelGGL(( test_null), dim3(1), dim3(1), 0, 0, scalar_device_view, result.data()); CHECK_CUDA(0); EXPECT_FALSE(result.value()); } template <typename ScalarDeviceViewType> __global__ void test_setnull(ScalarDeviceViewType s) { s.set_valid(false); } TYPED_TEST(TypedScalarDeviceViewTest, SetNull) { cudf::experimental::scalar_type_t<TypeParam> s; auto scalar_device_view = cudf::get_scalar_device_view(s); s.set_valid(true); EXPECT_TRUE(s.is_valid()); hipLaunchKernelGGL(( test_setnull), dim3(1), dim3(1), 0, 0, scalar_device_view); CHECK_CUDA(0); EXPECT_FALSE(s.is_valid()); } struct StringScalarDeviceViewTest : public cudf::test::BaseFixture {}; __global__ void test_string_value(cudf::string_scalar_device_view s, const char* value, cudf::size_type size, bool* result) { *result = (s.value() == cudf::string_view(value, size)); } TEST_F(StringScalarDeviceViewTest, Value) { std::string value("test string"); cudf::string_scalar s(value); auto scalar_device_view = cudf::get_scalar_device_view(s); rmm::device_scalar<bool> result; rmm::device_vector<char> value_v(value.begin(), value.end()); hipLaunchKernelGGL(( test_string_value), dim3(1), dim3(1), 0, 0, scalar_device_view, value_v.data().get(), value.size(), result.data()); CHECK_CUDA(0); EXPECT_TRUE(result.value()); }
3fa942207f43965f9c9c3bfd9332683e9db35c14.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/scalar/scalar.hpp> #include <cudf/scalar/scalar_device_view.cuh> #include <cudf/types.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <tests/utilities/base_fixture.hpp> #include <tests/utilities/cudf_gtest.hpp> #include <tests/utilities/type_list_utilities.hpp> #include <tests/utilities/type_lists.hpp> #include <thrust/sequence.h> #include <random> #include <gmock/gmock.h> template <typename T> struct TypedScalarDeviceViewTest : public cudf::test::BaseFixture {}; TYPED_TEST_CASE(TypedScalarDeviceViewTest, cudf::test::FixedWidthTypes); template <typename ScalarDeviceViewType> __global__ void test_set_value(ScalarDeviceViewType s, ScalarDeviceViewType s1) { s1.set_value(s.value()); s1.set_valid(true); } template <typename ScalarDeviceViewType> __global__ void test_value(ScalarDeviceViewType s, ScalarDeviceViewType s1, bool *result) { *result = (s.value() == s1.value()); } TYPED_TEST(TypedScalarDeviceViewTest, Value) { TypeParam value{7}; cudf::experimental::scalar_type_t<TypeParam> s(value); cudf::experimental::scalar_type_t<TypeParam> s1; auto scalar_device_view = cudf::get_scalar_device_view(s); auto scalar_device_view1 = cudf::get_scalar_device_view(s1); rmm::device_scalar<bool> result; test_set_value<<<1, 1>>>(scalar_device_view, scalar_device_view1); CHECK_CUDA(0); EXPECT_EQ(s1.value(), value); EXPECT_TRUE(s1.is_valid()); test_value<<<1, 1>>>(scalar_device_view, scalar_device_view1, result.data()); CHECK_CUDA(0); EXPECT_TRUE(result.value()); } template <typename ScalarDeviceViewType> __global__ void test_null(ScalarDeviceViewType s, bool* result) { *result = s.is_valid(); } TYPED_TEST(TypedScalarDeviceViewTest, ConstructNull) { TypeParam value = 5; cudf::experimental::scalar_type_t<TypeParam> s(value, false); auto scalar_device_view = cudf::get_scalar_device_view(s); rmm::device_scalar<bool> result; test_null<<<1, 1>>>(scalar_device_view, result.data()); CHECK_CUDA(0); EXPECT_FALSE(result.value()); } template <typename ScalarDeviceViewType> __global__ void test_setnull(ScalarDeviceViewType s) { s.set_valid(false); } TYPED_TEST(TypedScalarDeviceViewTest, SetNull) { cudf::experimental::scalar_type_t<TypeParam> s; auto scalar_device_view = cudf::get_scalar_device_view(s); s.set_valid(true); EXPECT_TRUE(s.is_valid()); test_setnull<<<1, 1>>>(scalar_device_view); CHECK_CUDA(0); EXPECT_FALSE(s.is_valid()); } struct StringScalarDeviceViewTest : public cudf::test::BaseFixture {}; __global__ void test_string_value(cudf::string_scalar_device_view s, const char* value, cudf::size_type size, bool* result) { *result = (s.value() == cudf::string_view(value, size)); } TEST_F(StringScalarDeviceViewTest, Value) { std::string value("test string"); cudf::string_scalar s(value); auto scalar_device_view = cudf::get_scalar_device_view(s); rmm::device_scalar<bool> result; rmm::device_vector<char> value_v(value.begin(), value.end()); test_string_value<<<1, 1>>>(scalar_device_view, value_v.data().get(), value.size(), result.data()); CHECK_CUDA(0); EXPECT_TRUE(result.value()); }
3860fcc8bb79f2163a4ccd830b28b192745c2570.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cavs/backend/functor_elementwise.h" #include "cavs/backend/cuda_common.h" #include "cavs/backend/op_impl.h" #include "cavs/midend/tensor.h" #include "cavs/midend/op_context.h" #include "cavs/proto/tensor_shape.pb.h" #include "cavs/util/macros_gpu.h" namespace backend { using ::midend::OpContext; using ::midend::Tensor; template <typename T> __global__ void SGDKernel(T* out, const T* inp0, const T* inp1, const float lr,size_t n) { CUDA_1D_KERNEL_LOOP(i, n) { out[i] = inp0[i] - lr*inp1[i]; } } template <typename T> class SGDOpImpl : public OpImpl { public: explicit SGDOpImpl(const OpDef& def) : OpImpl(def), lr_(0.f) { lr_ = GetSingleArg<float>(def, "Learning_rate"); VLOG(V_DEBUG) << "learning_rate = " << lr_; /*count_ = 0;*/ } void Compute(OpContext* context) override { /*if (count_++ > 2293*30 && count_ % 2293 == 0) {*/ /*lr_ /= 2;*/ /*LOG(INFO) << "Lr Changed to " << lr_;*/ /*}*/ const Tensor& inp0 = context->Input(0); const Tensor& inp1 = context->Input(1); inp0.DebugNumerical<T>(); inp1.DebugNumerical<T>(); Tensor* out = context->Output(0); int n = out->count(); hipLaunchKernelGGL(( SGDKernel<T>), dim3(BLOCKS_PER_GRID(n)), dim3(THREADS_PER_BLOCK), 0, 0, out->mutable_data<T>(), inp0.data<T>(), inp1.data<T>(), lr_, n); out->DebugNumerical<T>(); } private: float lr_; /*int count_;*/ }; REGISTER_OP_IMPL_BUILDER(Key("SGD").Device("GPU"), SGDOpImpl<float>); } //namespace backend
3860fcc8bb79f2163a4ccd830b28b192745c2570.cu
#include "cavs/backend/functor_elementwise.h" #include "cavs/backend/cuda_common.h" #include "cavs/backend/op_impl.h" #include "cavs/midend/tensor.h" #include "cavs/midend/op_context.h" #include "cavs/proto/tensor_shape.pb.h" #include "cavs/util/macros_gpu.h" namespace backend { using ::midend::OpContext; using ::midend::Tensor; template <typename T> __global__ void SGDKernel(T* out, const T* inp0, const T* inp1, const float lr,size_t n) { CUDA_1D_KERNEL_LOOP(i, n) { out[i] = inp0[i] - lr*inp1[i]; } } template <typename T> class SGDOpImpl : public OpImpl { public: explicit SGDOpImpl(const OpDef& def) : OpImpl(def), lr_(0.f) { lr_ = GetSingleArg<float>(def, "Learning_rate"); VLOG(V_DEBUG) << "learning_rate = " << lr_; /*count_ = 0;*/ } void Compute(OpContext* context) override { /*if (count_++ > 2293*30 && count_ % 2293 == 0) {*/ /*lr_ /= 2;*/ /*LOG(INFO) << "Lr Changed to " << lr_;*/ /*}*/ const Tensor& inp0 = context->Input(0); const Tensor& inp1 = context->Input(1); inp0.DebugNumerical<T>(); inp1.DebugNumerical<T>(); Tensor* out = context->Output(0); int n = out->count(); SGDKernel<T><<<BLOCKS_PER_GRID(n), THREADS_PER_BLOCK>>> ( out->mutable_data<T>(), inp0.data<T>(), inp1.data<T>(), lr_, n); out->DebugNumerical<T>(); } private: float lr_; /*int count_;*/ }; REGISTER_OP_IMPL_BUILDER(Key("SGD").Device("GPU"), SGDOpImpl<float>); } //namespace backend
26fc7aab36c533d379ced734aa5c5a52c92201c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*author: Shikun Wang*/ #include <stdio.h> #include "timerc.h" __global__ void hello(int size, int depth) { printf("block: %d thread: %d depth: %d\n",blockIdx.x,threadIdx.x,depth); if(size==1) return; if(threadIdx.x==0&&blockIdx.x==0) {printf("callling from depth %d\n", depth);hipLaunchKernelGGL(( hello), dim3(2),dim3(size/2), 0, 0, size/2,depth+1); } hipDeviceSynchronize(); printf("finish from depth %d \n",depth); } __global__ void naive_recursive_sum(int* v,int n,int start) { if(n==1) { if(threadIdx.x==0) v[start]=v[start]+v[start+n]; } if(n>=2) { hipLaunchKernelGGL(( naive_recursive_sum), dim3(1),dim3(2), 0, 0, v,n/2,start+threadIdx.x*n); __syncthreads(); if(threadIdx.x==0) { hipDeviceSynchronize(); v[start]+=v[start+n]; } } } //better recursive sum __global__ void better_recursive_sum(int* v_input,int* v_output, int n, int flag) { //store the original block numbers the thread is in if (n ==64){ flag = blockIdx.x; } //use for recursion hodler int* v_input_fixed=v_input+blockIdx.x*n; //bash case: add the two value and store in the ouput[blockID] if(n==2 && threadIdx.x==0){ v_output[flag] = v_input_fixed[0] + v_input_fixed[1]; }else{ //copy the element from nextblock by adding up int s=n/2; if(threadIdx.x<s){ v_input_fixed[threadIdx.x]+=v_input_fixed[threadIdx.x+s]; } //make sure threads are all finished copying __syncthreads(); //thread 0 call another kernel recursively if(threadIdx.x==0) { hipLaunchKernelGGL(( better_recursive_sum), dim3(1),dim3(s/2), 0, 0, v_input_fixed,v_output, n/2, flag); } } } int main() { //initialzied array int k = 10; hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, 1024); int n = 1<<(k); int blocksize = 32; int numberofblock = (n+blocksize-1) / blocksize; //print correct answer printf("expected sum: %d\n",n); float gpu2Time; float cpu2Time; //prepare input int * host_v= (int *) malloc(sizeof(int) * n); int * device_v; hipMalloc((void**) &device_v, sizeof(int) * n); for(int i=0 ;i<n; i++) host_v[i] = 1; gstart(); //prepare output buffer int * host_output = (int *) malloc(sizeof(int)*numberofblock); int * dev_output; hipMalloc((void **) &dev_output, sizeof(int) * numberofblock); hipMemcpy(device_v, host_v, sizeof(int)*n,hipMemcpyHostToDevice); //call kernel //naive_recursive_sum<<<1,2>>>(device_v,n/2,0); hipLaunchKernelGGL(( better_recursive_sum), dim3(numberofblock), dim3(blocksize), 0, 0, device_v,dev_output, 64, 0); hipMemcpy(host_output, dev_output, sizeof(int) * numberofblock, hipMemcpyDeviceToHost); gend(&gpu2Time); /*naive finisher*/ cstart(); int total = 0; //hipMemcpy(&total,device_v,sizeof(int),hipMemcpyDeviceToHost); for (int i = 0; i < numberofblock; i++){ total+= host_output[i]; } printf("my answer is %d \n",total); cend(&cpu2Time); printf("The gputime using less naive is %f, and the CPU completion time is %f\n", gpu2Time, cpu2Time); hipDeviceSynchronize(); return 0; }
26fc7aab36c533d379ced734aa5c5a52c92201c9.cu
/*author: Shikun Wang*/ #include <stdio.h> #include "timerc.h" __global__ void hello(int size, int depth) { printf("block: %d thread: %d depth: %d\n",blockIdx.x,threadIdx.x,depth); if(size==1) return; if(threadIdx.x==0&&blockIdx.x==0) {printf("callling from depth %d\n", depth); hello<<<2,size/2>>>(size/2,depth+1); } cudaDeviceSynchronize(); printf("finish from depth %d \n",depth); } __global__ void naive_recursive_sum(int* v,int n,int start) { if(n==1) { if(threadIdx.x==0) v[start]=v[start]+v[start+n]; } if(n>=2) { naive_recursive_sum<<<1,2>>>(v,n/2,start+threadIdx.x*n); __syncthreads(); if(threadIdx.x==0) { cudaDeviceSynchronize(); v[start]+=v[start+n]; } } } //better recursive sum __global__ void better_recursive_sum(int* v_input,int* v_output, int n, int flag) { //store the original block numbers the thread is in if (n ==64){ flag = blockIdx.x; } //use for recursion hodler int* v_input_fixed=v_input+blockIdx.x*n; //bash case: add the two value and store in the ouput[blockID] if(n==2 && threadIdx.x==0){ v_output[flag] = v_input_fixed[0] + v_input_fixed[1]; }else{ //copy the element from nextblock by adding up int s=n/2; if(threadIdx.x<s){ v_input_fixed[threadIdx.x]+=v_input_fixed[threadIdx.x+s]; } //make sure threads are all finished copying __syncthreads(); //thread 0 call another kernel recursively if(threadIdx.x==0) { better_recursive_sum<<<1,s/2>>>(v_input_fixed,v_output, n/2, flag); } } } int main() { //initialzied array int k = 10; cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, 1024); int n = 1<<(k); int blocksize = 32; int numberofblock = (n+blocksize-1) / blocksize; //print correct answer printf("expected sum: %d\n",n); float gpu2Time; float cpu2Time; //prepare input int * host_v= (int *) malloc(sizeof(int) * n); int * device_v; cudaMalloc((void**) &device_v, sizeof(int) * n); for(int i=0 ;i<n; i++) host_v[i] = 1; gstart(); //prepare output buffer int * host_output = (int *) malloc(sizeof(int)*numberofblock); int * dev_output; cudaMalloc((void **) &dev_output, sizeof(int) * numberofblock); cudaMemcpy(device_v, host_v, sizeof(int)*n,cudaMemcpyHostToDevice); //call kernel //naive_recursive_sum<<<1,2>>>(device_v,n/2,0); better_recursive_sum<<<numberofblock, blocksize>>>(device_v,dev_output, 64, 0); cudaMemcpy(host_output, dev_output, sizeof(int) * numberofblock, cudaMemcpyDeviceToHost); gend(&gpu2Time); /*naive finisher*/ cstart(); int total = 0; //cudaMemcpy(&total,device_v,sizeof(int),cudaMemcpyDeviceToHost); for (int i = 0; i < numberofblock; i++){ total+= host_output[i]; } printf("my answer is %d \n",total); cend(&cpu2Time); printf("The gputime using less naive is %f, and the CPU completion time is %f\n", gpu2Time, cpu2Time); cudaDeviceSynchronize(); return 0; }
aaa93a31ae92926824b0149b4e13d81a17d477af.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "mandelbrot.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); const int N = 1; const int largeur = 1; const int hauteur = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( mandelbrot), dim3(gridBlock),dim3(threadBlock), 0, 0, A,N,largeur,hauteur); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( mandelbrot), dim3(gridBlock),dim3(threadBlock), 0, 0, A,N,largeur,hauteur); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( mandelbrot), dim3(gridBlock),dim3(threadBlock), 0, 0, A,N,largeur,hauteur); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
aaa93a31ae92926824b0149b4e13d81a17d477af.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "mandelbrot.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); const int N = 1; const int largeur = 1; const int hauteur = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); mandelbrot<<<gridBlock,threadBlock>>>(A,N,largeur,hauteur); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { mandelbrot<<<gridBlock,threadBlock>>>(A,N,largeur,hauteur); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { mandelbrot<<<gridBlock,threadBlock>>>(A,N,largeur,hauteur); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c62427b20a184111adba95840b282c85f7a02aa6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include "png_util.h" #define min(X,Y) ((X) < (Y) ? (X) : (Y)) #define max(X,Y) ((X) > (Y) ? (X) : (Y)) #define CUDA_CALL(x) {hipError_t cuda_error__ = (x); if (cuda_error__) printf("CUDA error: " #x " returned \"%s\"\n", hipGetErrorString(cuda_error__));} #define BLOCKDIM 1024 __global__ void array_setup(int nx, int ny, double dx, double dy, double* d_z, double* d_v, double* d_a) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < nx * ny) { int row = i / nx; int col = i % nx; double x = (double)col*dx; double y = (double)row*dy; d_z[i] = exp(-(sqrt((x-5.0)*(x-5.0)+(y-5.0)*(y-5.0)))); d_v[i] = 0.0; d_a[i] = 0.0; } } __global__ void accel_update(int nx, int ny, double dx2inv, double dy2inv, double* d_z, double* d_a) { int i = blockDim.x * blockIdx.x + threadIdx.x; double ax, ay; int r = i / nx; int c = i % nx; if(i < nx*ny) { if(r<ny-1 && r>0 && c<nx-1 && c>0){ ax = (d_z[i+nx]+d_z[i-nx]-2.0*d_z[i])*dx2inv; ay = (d_z[i+1]+d_z[i-1]-2.0*d_z[i])*dy2inv; d_a[i] = (ax+ay)/2; } else d_a[i] = 0.0; } } __global__ void pos_update(int nx, int ny, double dt, double* d_z, double* d_v, double* d_a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int r = i / nx; int c = i % nx; if(r<ny-1 && r>0 && c<nx-1 && c>0){ d_v[i] = d_v[i] + dt*d_a[i]; d_z[i] = d_z[i] + dt*d_v[i]; } } int main(int argc, char ** argv) { int nx = 500; int ny = 500; int N = nx * ny; int nt = 10000; int frame=0; //int nt = 1000000; int i,it; double dx,dy,dt; double max,min; double tmax; double dx2inv, dy2inv; char filename[sizeof "./images/cuda00000.png"]; double *d_z, *d_v, *d_a, *d_output; image_size_t sz; sz.width=nx; sz.height=ny; int mem_size = nx * ny * sizeof(double); //make mesh double * h_z = (double *) malloc(mem_size); //Velocity double * h_v = (double *) malloc(mem_size); //Accelleration double * h_a = (double *) malloc(mem_size); //output image unsigned char * o_img = (unsigned char *) malloc(sz.width*sz.height*sizeof(unsigned char)); max=10.0; min=0.0; dx = (max-min)/(double)(nx-1); dy = (max-min)/(double)(ny-1); tmax=20.0; dt= (tmax-0.0)/(double)(nt-1); CUDA_CALL(hipMalloc((void **)&d_z, mem_size)); CUDA_CALL(hipMalloc((void **)&d_v, mem_size)); CUDA_CALL(hipMalloc((void **)&d_a, mem_size)); CUDA_CALL(hipMalloc((void **)&d_output, sz.width*sz.height*sizeof(unsigned char))); int block_size = BLOCKDIM; int num_blocks = N / block_size + 1; dim3 dimBlock(block_size, 1, 1); dim3 dimGrid(num_blocks, 1, 1); hipLaunchKernelGGL(( array_setup), dim3(dimGrid), dim3(dimBlock), 0, 0, nx, ny, dx, dy, d_z, d_v, d_a); dx2inv=1.0/(dx*dx); dy2inv=1.0/(dy*dy); for(it=0;it<nt-1;it++) { hipLaunchKernelGGL(( accel_update), dim3(dimGrid), dim3(dimBlock), 0, 0, nx, ny, dx2inv, dy2inv, d_z, d_a); hipLaunchKernelGGL(( pos_update), dim3(dimGrid), dim3(dimBlock), 0, 0, nx, ny, dt, d_z, d_v, d_a); if (it % 100 ==0) { CUDA_CALL(hipMemcpy(h_z, d_z, mem_size, hipMemcpyDeviceToHost)); double mx,mn; mx = -999999; mn = 999999; for(i=0; i<N; ++i) { mx = max(mx, h_z[i]); mn = min(mn, h_z[i]); } for(i=0; i<N; ++i) { o_img[i] = (char) round((h_z[i]-mn)/(mx-mn)*255); } sprintf(filename, "./images/cuda%05d.png", frame); printf("Writing %s\n",filename); write_png_file(filename,o_img,sz); frame+=1; } } CUDA_CALL(hipMemcpy(h_z, d_z, mem_size, hipMemcpyDeviceToHost)); double mx,mn; mx = -999999; mn = 999999; for(i=0; i<N; ++i) { mx = max(mx, h_z[i]); mn = min(mn, h_z[i]); } for(i=0; i<N; ++i) { o_img[i] = (char) round((h_z[i]-mn)/(mx-mn)*255); } sprintf(filename, "./images/cuda%05d.png", it); printf("Writing %s\n",filename); //Write out output image using 1D serial pointer write_png_file(filename,o_img,sz); return 0; }
c62427b20a184111adba95840b282c85f7a02aa6.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "png_util.h" #define min(X,Y) ((X) < (Y) ? (X) : (Y)) #define max(X,Y) ((X) > (Y) ? (X) : (Y)) #define CUDA_CALL(x) {cudaError_t cuda_error__ = (x); if (cuda_error__) printf("CUDA error: " #x " returned \"%s\"\n", cudaGetErrorString(cuda_error__));} #define BLOCKDIM 1024 __global__ void array_setup(int nx, int ny, double dx, double dy, double* d_z, double* d_v, double* d_a) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < nx * ny) { int row = i / nx; int col = i % nx; double x = (double)col*dx; double y = (double)row*dy; d_z[i] = exp(-(sqrt((x-5.0)*(x-5.0)+(y-5.0)*(y-5.0)))); d_v[i] = 0.0; d_a[i] = 0.0; } } __global__ void accel_update(int nx, int ny, double dx2inv, double dy2inv, double* d_z, double* d_a) { int i = blockDim.x * blockIdx.x + threadIdx.x; double ax, ay; int r = i / nx; int c = i % nx; if(i < nx*ny) { if(r<ny-1 && r>0 && c<nx-1 && c>0){ ax = (d_z[i+nx]+d_z[i-nx]-2.0*d_z[i])*dx2inv; ay = (d_z[i+1]+d_z[i-1]-2.0*d_z[i])*dy2inv; d_a[i] = (ax+ay)/2; } else d_a[i] = 0.0; } } __global__ void pos_update(int nx, int ny, double dt, double* d_z, double* d_v, double* d_a) { int i = blockDim.x * blockIdx.x + threadIdx.x; int r = i / nx; int c = i % nx; if(r<ny-1 && r>0 && c<nx-1 && c>0){ d_v[i] = d_v[i] + dt*d_a[i]; d_z[i] = d_z[i] + dt*d_v[i]; } } int main(int argc, char ** argv) { int nx = 500; int ny = 500; int N = nx * ny; int nt = 10000; int frame=0; //int nt = 1000000; int i,it; double dx,dy,dt; double max,min; double tmax; double dx2inv, dy2inv; char filename[sizeof "./images/cuda00000.png"]; double *d_z, *d_v, *d_a, *d_output; image_size_t sz; sz.width=nx; sz.height=ny; int mem_size = nx * ny * sizeof(double); //make mesh double * h_z = (double *) malloc(mem_size); //Velocity double * h_v = (double *) malloc(mem_size); //Accelleration double * h_a = (double *) malloc(mem_size); //output image unsigned char * o_img = (unsigned char *) malloc(sz.width*sz.height*sizeof(unsigned char)); max=10.0; min=0.0; dx = (max-min)/(double)(nx-1); dy = (max-min)/(double)(ny-1); tmax=20.0; dt= (tmax-0.0)/(double)(nt-1); CUDA_CALL(cudaMalloc((void **)&d_z, mem_size)); CUDA_CALL(cudaMalloc((void **)&d_v, mem_size)); CUDA_CALL(cudaMalloc((void **)&d_a, mem_size)); CUDA_CALL(cudaMalloc((void **)&d_output, sz.width*sz.height*sizeof(unsigned char))); int block_size = BLOCKDIM; int num_blocks = N / block_size + 1; dim3 dimBlock(block_size, 1, 1); dim3 dimGrid(num_blocks, 1, 1); array_setup<<<dimGrid, dimBlock>>>(nx, ny, dx, dy, d_z, d_v, d_a); dx2inv=1.0/(dx*dx); dy2inv=1.0/(dy*dy); for(it=0;it<nt-1;it++) { accel_update<<<dimGrid, dimBlock>>>(nx, ny, dx2inv, dy2inv, d_z, d_a); pos_update<<<dimGrid, dimBlock>>>(nx, ny, dt, d_z, d_v, d_a); if (it % 100 ==0) { CUDA_CALL(cudaMemcpy(h_z, d_z, mem_size, cudaMemcpyDeviceToHost)); double mx,mn; mx = -999999; mn = 999999; for(i=0; i<N; ++i) { mx = max(mx, h_z[i]); mn = min(mn, h_z[i]); } for(i=0; i<N; ++i) { o_img[i] = (char) round((h_z[i]-mn)/(mx-mn)*255); } sprintf(filename, "./images/cuda%05d.png", frame); printf("Writing %s\n",filename); write_png_file(filename,o_img,sz); frame+=1; } } CUDA_CALL(cudaMemcpy(h_z, d_z, mem_size, cudaMemcpyDeviceToHost)); double mx,mn; mx = -999999; mn = 999999; for(i=0; i<N; ++i) { mx = max(mx, h_z[i]); mn = min(mn, h_z[i]); } for(i=0; i<N; ++i) { o_img[i] = (char) round((h_z[i]-mn)/(mx-mn)*255); } sprintf(filename, "./images/cuda%05d.png", it); printf("Writing %s\n",filename); //Write out output image using 1D serial pointer write_png_file(filename,o_img,sz); return 0; }
a1f1e5cec6a7ba7ab7e2a4019ee829c8675c34ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* reduces.cu * Ernest Yeung * [email protected] * Demonstrates reduce with parallel and serial implementations * with CUDA C/C++ and global memory * * */ #include "reduces.h" // parallel implementations __global__ void global_reduce_kernel( float * d_in, float * d_out, const int L ) { int k_x = threadIdx.x + blockDim.x * blockIdx.x ; int tid = threadIdx.x ; if (k_x >= L) { return; } // do reduction in global mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { if ( (k_x +s) < L ) { d_in[k_x] += d_in[k_x + s]; } } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[k_x]; } } __global__ void shmem_reduce_kernel(const float * d_in, float * d_out) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int k_x = threadIdx.x + blockDim.x * blockIdx.x ; int tid = threadIdx.x ; // load shared mem from global mem sdata[tid] = d_in[k_x] ; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem int M_x = blockDim.x; // M_x := total number of threads in a (single thread) block, ARRAY_SIZE = 32 in this case for (unsigned int s = M_x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads() ; // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } __global__ void shmem_reduce_max_kernel( const float * d_in, float * d_out) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int k_x = threadIdx.x + blockDim.x * blockIdx.x ; int tid = threadIdx.x ; // load shared mem from global mem sdata[tid] = d_in[k_x] ; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem int M_x = blockDim.x; // M_x := total number of threads in a (single thread) block, ARRAY_SIZE = 32 in this case for (unsigned int s = M_x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = fmaxf( sdata[tid], sdata[tid + s] ) ; } __syncthreads() ; // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } __global__ void shmem_reduce_min_kernel( const float * d_in, float * d_out) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int k_x = threadIdx.x + blockDim.x * blockIdx.x ; int tid = threadIdx.x ; // load shared mem from global mem sdata[tid] = d_in[k_x] ; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem int M_x = blockDim.x; // M_x := total number of threads in a (single thread) block, ARRAY_SIZE = 32 in this case for (unsigned int s = M_x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = fminf( sdata[tid], sdata[tid + s] ) ; } __syncthreads() ; // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void reduce_global(float * d_in, float * out, const int L, int M_in) { int N_x { ( L + M_in - 1)/ M_in } ; int M_x { M_in }; // declare GPU memory pointers float *dev_intermediate, *dev_out; // allocate GPU memory checkCudaErrors( hipMalloc((void **) &dev_out, sizeof(float)) ); checkCudaErrors( hipMalloc((void **) &dev_intermediate, N_x * sizeof(float)) ); hipLaunchKernelGGL(( global_reduce_kernel), dim3(N_x), dim3(M_x), 0, 0, d_in, dev_intermediate, L ) ; // now we're down to one block left, so reduce it M_x = N_x; N_x = 1; hipLaunchKernelGGL(( global_reduce_kernel), dim3(N_x),dim3(M_x), 0, 0, dev_intermediate, dev_out, M_x) ; // copy our results from device to host checkCudaErrors( hipMemcpy( out, dev_out, sizeof(float), hipMemcpyDeviceToHost) ); hipFree( dev_out ); hipFree( dev_intermediate ); } void reduce_shmem(float * d_in, float * out, const int L, int M_in) //void reduce_shmem(float * d_in, float & out, const int L, int M_in) { int N_x { ( L + M_in - 1)/ M_in } ; int M_x { M_in }; // declare GPU memory pointers float *dev_intermediate, *dev_out; // allocate GPU memory checkCudaErrors( hipMalloc((void **) &dev_out, sizeof(float)) ); checkCudaErrors( hipMalloc((void **) &dev_intermediate, N_x * sizeof(float)) ); hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(N_x), dim3(M_x), M_x*sizeof(float), 0, d_in, dev_intermediate ) ; // now we're down to one block left, so reduce it M_x = N_x; N_x = 1; hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(N_x),dim3(M_x), M_x*sizeof(float), 0, dev_intermediate, dev_out ) ; // copy our results from device to host checkCudaErrors( hipMemcpy( out, dev_out, sizeof(float), hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree( dev_out ) ); checkCudaErrors( hipFree( dev_intermediate ) ); } void reduce_shmem_max(float * d_in, float * out, const int L, int M_in) { int N_x { ( L + M_in - 1)/ M_in } ; int M_x { M_in }; // declare GPU memory pointers float *dev_intermediate, *dev_out; // allocate GPU memory checkCudaErrors( hipMalloc((void **) &dev_out, sizeof(float)) ); checkCudaErrors( hipMalloc((void **) &dev_intermediate, N_x * sizeof(float)) ); hipLaunchKernelGGL(( shmem_reduce_max_kernel), dim3(N_x), dim3(M_x), M_x*sizeof(float), 0, d_in, dev_intermediate ) ; // now we're down to one block left, so reduce it M_x = N_x; N_x = 1; hipLaunchKernelGGL(( shmem_reduce_max_kernel), dim3(N_x),dim3(M_x), M_x*sizeof(float), 0, dev_intermediate, dev_out ) ; // copy our results from device to host checkCudaErrors( hipMemcpy( out, dev_out, sizeof(float), hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree( dev_out ) ); checkCudaErrors( hipFree( dev_intermediate ) ); } void reduce_shmem_min(float * d_in, float * out, const int L, int M_in) { int N_x { ( L + M_in - 1)/ M_in } ; int M_x { M_in }; // declare GPU memory pointers float *dev_intermediate, *dev_out; // allocate GPU memory checkCudaErrors( hipMalloc((void **) &dev_out, sizeof(float)) ); checkCudaErrors( hipMalloc((void **) &dev_intermediate, N_x * sizeof(float)) ); hipLaunchKernelGGL(( shmem_reduce_min_kernel), dim3(N_x), dim3(M_x), M_x*sizeof(float), 0, d_in, dev_intermediate ) ; // now we're down to one block left, so reduce it M_x = N_x; N_x = 1; hipLaunchKernelGGL(( shmem_reduce_min_kernel), dim3(N_x),dim3(M_x), M_x*sizeof(float), 0, dev_intermediate, dev_out ) ; // copy our results from device to host checkCudaErrors( hipMemcpy( out, dev_out, sizeof(float), hipMemcpyDeviceToHost) ); checkCudaErrors( hipFree( dev_out ) ); checkCudaErrors( hipFree( dev_intermediate ) ); }
a1f1e5cec6a7ba7ab7e2a4019ee829c8675c34ce.cu
/* reduces.cu * Ernest Yeung * [email protected] * Demonstrates reduce with parallel and serial implementations * with CUDA C/C++ and global memory * * */ #include "reduces.h" // parallel implementations __global__ void global_reduce_kernel( float * d_in, float * d_out, const int L ) { int k_x = threadIdx.x + blockDim.x * blockIdx.x ; int tid = threadIdx.x ; if (k_x >= L) { return; } // do reduction in global mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { if ( (k_x +s) < L ) { d_in[k_x] += d_in[k_x + s]; } } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[k_x]; } } __global__ void shmem_reduce_kernel(const float * d_in, float * d_out) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int k_x = threadIdx.x + blockDim.x * blockIdx.x ; int tid = threadIdx.x ; // load shared mem from global mem sdata[tid] = d_in[k_x] ; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem int M_x = blockDim.x; // M_x := total number of threads in a (single thread) block, ARRAY_SIZE = 32 in this case for (unsigned int s = M_x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads() ; // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } __global__ void shmem_reduce_max_kernel( const float * d_in, float * d_out) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int k_x = threadIdx.x + blockDim.x * blockIdx.x ; int tid = threadIdx.x ; // load shared mem from global mem sdata[tid] = d_in[k_x] ; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem int M_x = blockDim.x; // M_x := total number of threads in a (single thread) block, ARRAY_SIZE = 32 in this case for (unsigned int s = M_x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = fmaxf( sdata[tid], sdata[tid + s] ) ; } __syncthreads() ; // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } __global__ void shmem_reduce_min_kernel( const float * d_in, float * d_out) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int k_x = threadIdx.x + blockDim.x * blockIdx.x ; int tid = threadIdx.x ; // load shared mem from global mem sdata[tid] = d_in[k_x] ; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem int M_x = blockDim.x; // M_x := total number of threads in a (single thread) block, ARRAY_SIZE = 32 in this case for (unsigned int s = M_x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = fminf( sdata[tid], sdata[tid + s] ) ; } __syncthreads() ; // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void reduce_global(float * d_in, float * out, const int L, int M_in) { int N_x { ( L + M_in - 1)/ M_in } ; int M_x { M_in }; // declare GPU memory pointers float *dev_intermediate, *dev_out; // allocate GPU memory checkCudaErrors( cudaMalloc((void **) &dev_out, sizeof(float)) ); checkCudaErrors( cudaMalloc((void **) &dev_intermediate, N_x * sizeof(float)) ); global_reduce_kernel<<<N_x, M_x>>>( d_in, dev_intermediate, L ) ; // now we're down to one block left, so reduce it M_x = N_x; N_x = 1; global_reduce_kernel<<<N_x,M_x>>>( dev_intermediate, dev_out, M_x) ; // copy our results from device to host checkCudaErrors( cudaMemcpy( out, dev_out, sizeof(float), cudaMemcpyDeviceToHost) ); cudaFree( dev_out ); cudaFree( dev_intermediate ); } void reduce_shmem(float * d_in, float * out, const int L, int M_in) //void reduce_shmem(float * d_in, float & out, const int L, int M_in) { int N_x { ( L + M_in - 1)/ M_in } ; int M_x { M_in }; // declare GPU memory pointers float *dev_intermediate, *dev_out; // allocate GPU memory checkCudaErrors( cudaMalloc((void **) &dev_out, sizeof(float)) ); checkCudaErrors( cudaMalloc((void **) &dev_intermediate, N_x * sizeof(float)) ); shmem_reduce_kernel<<<N_x, M_x, M_x*sizeof(float)>>>( d_in, dev_intermediate ) ; // now we're down to one block left, so reduce it M_x = N_x; N_x = 1; shmem_reduce_kernel<<<N_x,M_x, M_x*sizeof(float)>>>( dev_intermediate, dev_out ) ; // copy our results from device to host checkCudaErrors( cudaMemcpy( out, dev_out, sizeof(float), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree( dev_out ) ); checkCudaErrors( cudaFree( dev_intermediate ) ); } void reduce_shmem_max(float * d_in, float * out, const int L, int M_in) { int N_x { ( L + M_in - 1)/ M_in } ; int M_x { M_in }; // declare GPU memory pointers float *dev_intermediate, *dev_out; // allocate GPU memory checkCudaErrors( cudaMalloc((void **) &dev_out, sizeof(float)) ); checkCudaErrors( cudaMalloc((void **) &dev_intermediate, N_x * sizeof(float)) ); shmem_reduce_max_kernel<<<N_x, M_x, M_x*sizeof(float)>>>( d_in, dev_intermediate ) ; // now we're down to one block left, so reduce it M_x = N_x; N_x = 1; shmem_reduce_max_kernel<<<N_x,M_x, M_x*sizeof(float)>>>( dev_intermediate, dev_out ) ; // copy our results from device to host checkCudaErrors( cudaMemcpy( out, dev_out, sizeof(float), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree( dev_out ) ); checkCudaErrors( cudaFree( dev_intermediate ) ); } void reduce_shmem_min(float * d_in, float * out, const int L, int M_in) { int N_x { ( L + M_in - 1)/ M_in } ; int M_x { M_in }; // declare GPU memory pointers float *dev_intermediate, *dev_out; // allocate GPU memory checkCudaErrors( cudaMalloc((void **) &dev_out, sizeof(float)) ); checkCudaErrors( cudaMalloc((void **) &dev_intermediate, N_x * sizeof(float)) ); shmem_reduce_min_kernel<<<N_x, M_x, M_x*sizeof(float)>>>( d_in, dev_intermediate ) ; // now we're down to one block left, so reduce it M_x = N_x; N_x = 1; shmem_reduce_min_kernel<<<N_x,M_x, M_x*sizeof(float)>>>( dev_intermediate, dev_out ) ; // copy our results from device to host checkCudaErrors( cudaMemcpy( out, dev_out, sizeof(float), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaFree( dev_out ) ); checkCudaErrors( cudaFree( dev_intermediate ) ); }
cd4e8c80b3fcd79ba72a8d54c5d89c5c20d084f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ. dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare. @generated from magmablas/zhemv_upper.cu, normal z -> d, Sun Nov 20 20:20:28 2016 @author Mark Gates */ #include "magma_internal.h" #include "commonblas_d.h" #define PRECISION_d #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /***************************************************************************//** Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. *******************************************************************************/ __global__ void dsymv_kernel_U( int n, double const * __restrict__ A, int lda, double const * __restrict__ x, int incx, double * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); double psum, psum_t; double total = MAGMA_D_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ double sx_blk[NB_X]; // for x[ blk ] __shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag double rA[4]; double psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_blk[tx] = x[0]; } else { sx_blk[tx] = MAGMA_D_ZERO; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) // move to 32x32 diag block A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 ) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X ) A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for (int jj=blk+1; jj < gridDim.x; ++jj) { partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_D_ZERO; } } __syncthreads(); for (int k=0; k < 4; k++) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for (int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_D_ZERO; } } } else { #pragma unroll for (int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for (int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for (int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end dsymv_kernel_U /***************************************************************************//** Upper case, sum up final results Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] *******************************************************************************/ __global__ void dsymv_kernel_U_sum( int n, double alpha, int lda, double beta, double * __restrict__ y, int incy, double const * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [0, ..., n) if ( ind < n ) { work += ind; double Ax = MAGMA_D_ZERO; for (int j = 0; j <= blk; ++j) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } }
cd4e8c80b3fcd79ba72a8d54c5d89c5c20d084f8.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ. dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare. @generated from magmablas/zhemv_upper.cu, normal z -> d, Sun Nov 20 20:20:28 2016 @author Mark Gates */ #include "magma_internal.h" #include "commonblas_d.h" #define PRECISION_d #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /***************************************************************************//** Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. *******************************************************************************/ __global__ void dsymv_kernel_U( int n, double const * __restrict__ A, int lda, double const * __restrict__ x, int incx, double * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); double psum, psum_t; double total = MAGMA_D_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ double sx_blk[NB_X]; // for x[ blk ] __shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag double rA[4]; double psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_blk[tx] = x[0]; } else { sx_blk[tx] = MAGMA_D_ZERO; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) // move to 32x32 diag block A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_D_ZERO; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 ) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X ) A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for (int jj=blk+1; jj < gridDim.x; ++jj) { partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_D_ZERO; } } __syncthreads(); for (int k=0; k < 4; k++) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for (int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_D_ZERO; } } } else { #pragma unroll for (int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for (int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_D_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for (int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end dsymv_kernel_U /***************************************************************************//** Upper case, sum up final results Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] *******************************************************************************/ __global__ void dsymv_kernel_U_sum( int n, double alpha, int lda, double beta, double * __restrict__ y, int incy, double const * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [0, ..., n) if ( ind < n ) { work += ind; double Ax = MAGMA_D_ZERO; for (int j = 0; j <= blk; ++j) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } }
60c1093a6ea36373603634640e59b99442b5ab8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // https://web.stanford.edu/class/cs277/resources/papers/Moller1997b.pdf // ideas for further work - precompute all robot mesh norms #include "MeshParser.h" #include <chrono> __device__ float dot(float3 a, float3 b){ return ( a.x * b.x +a.y * b.y +a.z * b.z); } __device__ float3 vecMinus(float3 a, float3 b){ return {a.x - b.x, a.y - b.y, a.z - b.z}; } __device__ float3 cross(float3 a, float3 b){ return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x}; } // returns the t1 value as seen in equation (4) in the cited paper __device__ float getParam(float p0, float p1, float d0, float d1){ return (p0 + (p1 - p0) * (d0) / (d0 - d1)); } __global__ void detect_collision( Triangle *obstacles, size_t obs_size, Triangle *robot, size_t rob_size, bool *collisions){ int idx = threadIdx.x + blockIdx.x * blockDim.x; // edge case where thread doesn't matter if (idx >= obs_size){ return; } Triangle obs = obstacles[idx]; // calculate normal for our obstacle triangle //////////////////////////////////////////////////////////////////////// float3 obs_vec1 = vecMinus(obs.B, obs.A); float3 obs_vec2 = vecMinus(obs.C, obs.A); float3 obs_norm = cross(obs_vec1, obs_vec2); // scalar that satisfies obs_norm * X + obs_d = 0 float obs_d = -1 * dot(obs_norm, obs.A); //test for intersection against all robot triangles //////////////////////////////////////////////////////////////////////// Triangle rob; bool has_collision = false; for (int i = 0; i < rob_size; i++){ rob = robot[i]; float3 obs_planar_distances; // note: x, y, and z represent which the distances // from the triangle to the obstacle plane, not coordinates obs_planar_distances.x = dot(obs_norm, rob.A) + obs_d; obs_planar_distances.y = dot(obs_norm, rob.B) + obs_d; obs_planar_distances.z = dot(obs_norm, rob.C) + obs_d; // coplanar case //TODO add rosetta code to my citations if (abs(obs_planar_distances.x + obs_planar_distances.y + obs_planar_distances.z) < 0.0001f) { //TODO, also refactor code so this can appear later //TODO - project vertices onto a flat plane bool (*chkEdge)(TriPoint &, TriPoint &, TriPoint &, double) = NULL; has_collision = true; //For edge E of trangle 1, for(int i=0; i<3; i++) { int j=(i+1)%3; //Check all points of trangle 2 lay on the external side of the edge E. If //they do, the triangles do not collide. if (chkEdge(t1[i], t1[j], t2[0], eps) && chkEdge(t1[i], t1[j], t2[1], eps) && chkEdge(t1[i], t1[j], t2[2], eps)){ has_collision = false; break; } } if (!has_collision) //For edge E of trangle 2, for(int i=0; i<3; i++) { int j=(i+1)%3; //Check all points of trangle 1 lay on the external side of the edge E. If //they do, the triangles do not collide. if (chkEdge(t2[i], t2[j], t1[0], eps) && chkEdge(t2[i], t2[j], t1[1], eps) && chkEdge(t2[i], t2[j], t1[2], eps)){ has_collision = false; break; } } if(has_collision){ break; } else { continue; } } // may want to change 0 to some small threshhold above 0 to allow for coplanar case if ((obs_planar_distances.x > 0 && obs_planar_distances.y > 0 && obs_planar_distances.z > 0) || (obs_planar_distances.x < 0 && obs_planar_distances.y < 0 && obs_planar_distances.z < 0)){ continue; } /////////////////////////////////////////////////////////////////////////////////// // calculate the projection of the obstacle triangle against the robot triangle now float3 rob_vec1 = vecMinus(obs.B, obs.A); float3 rob_vec2 = vecMinus(obs.C, obs.A); float3 rob_norm = cross(obs_vec1, obs_vec2); // scalar that satisfies obs_norm * X + obs_d = 0 float rob_d = -1 * dot(obs_norm, obs.A); float3 rob_planar_distances; rob_planar_distances.x = dot(rob_norm, obs.A) + rob_d; rob_planar_distances.y = dot(rob_norm, obs.B) + rob_d; rob_planar_distances.z = dot(rob_norm, obs.C) + rob_d; if ((rob_planar_distances.x > 0 && rob_planar_distances.y > 0 && rob_planar_distances.z > 0) || (rob_planar_distances.x < 0 && rob_planar_distances.y < 0 && rob_planar_distances.z < 0)){ continue; } /////////////////////////////////////////////////////////////////////////////////// float3 direction = cross(rob_norm, obs_norm); // get points of obs intersecting line and corresponding planar distance float obs_intersect1, obs_intersect2; float obs_distance1, obs_distance2; if (rob_planar_distances.x > 0){ if (rob_planar_distances.y > 0){ obs_intersect1 = dot(direction, obs.A); obs_intersect2 = dot(direction, obs.B); obs_distance1 = rob_planar_distances.x; obs_distance2 = rob_planar_distances.y; } else { obs_intersect1 = dot(direction, obs.A); obs_intersect2 = dot(direction, obs.C); obs_distance1 = rob_planar_distances.x; obs_distance2 = rob_planar_distances.z; } } else { obs_intersect1 = dot(direction, obs.B); obs_intersect2 = dot(direction, obs.C); obs_distance1 = rob_planar_distances.y; obs_distance2 = rob_planar_distances.z; } // get points of rob intersecting line float rob_intersect1, rob_intersect2; float rob_distance1, rob_distance2; if (obs_planar_distances.x > 0){ if (obs_planar_distances.y > 0){ rob_intersect1 = dot(direction, rob.A); rob_intersect2 = dot(direction, rob.B); rob_distance1 = obs_planar_distances.x; rob_distance2 = obs_planar_distances.y; } else { rob_intersect1 = dot(direction, rob.A); rob_intersect2 = dot(direction, rob.C); rob_distance1 = obs_planar_distances.x; rob_distance2 = obs_planar_distances.z; } } else { rob_intersect1 = dot(direction, rob.B); rob_intersect2 = dot(direction, rob.C); rob_distance1 = obs_planar_distances.y; rob_distance2 = obs_planar_distances.z; } // should probably refactor these above if statements so that this is a part of it // get parameters such that intersection = obs_paramx * D float obs_param1 = getParam( obs_intersect1, obs_intersect2, obs_distance1, obs_distance2); float obs_param2 = getParam( obs_intersect2, obs_intersect1, obs_distance2, obs_distance1); float rob_param1 = getParam( rob_intersect1, rob_intersect2, rob_distance1, rob_distance2); float rob_param2 = getParam( rob_intersect2, rob_intersect1, rob_distance2, rob_distance1); // swap so that 1 is smaller if (obs_param1 > obs_param2) { float tmp = obs_param2; obs_param2 = obs_param1; obs_param1 = tmp; } if (rob_param1 > rob_param2) { float tmp = rob_param2; rob_param2 = rob_param1; rob_param1 = tmp; } if ( (obs_param2 < rob_param1) || obs_param1 > rob_param1) { continue; // no collision } else { has_collision = true; break; } } collisions[idx] = has_collision; } float dotc(float3 a, float3 b){ return ( (a.x * b.x) +(a.y * b.y) +(a.z * b.z)); } float3 vecMinusc(float3 a, float3 b){ return {a.x - b.x, a.y - b.y, a.z - b.z}; } float3 crossc(float3 a, float3 b){ return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x}; } // returns the t1 value as seen in equation (4) in the cited paper float getParamc(float p0, float p1, float d0, float d1){ return (p0 + (p1 - p0) * (d0) / (d0 - d1)); } void detectCollisionCPU(std::vector<Triangle> &robot, std::vector<Triangle> &obstacles, bool *collisions){ for (int idx = 0; idx < obstacles.size(); idx++){ Triangle obs = obstacles[idx]; // calculate normal for our obstacle triangle //////////////////////////////////////////////////////////////////////// float3 obs_vec1 = vecMinusc(obs.B, obs.A); float3 obs_vec2 = vecMinusc(obs.C, obs.A); float3 obs_norm = crossc(obs_vec1, obs_vec2); // scalar that satisfies obs_norm * X + obs_d = 0 float obs_d = -1 * dotc(obs_norm, obs.A); //test for intersection against all robot triangles //////////////////////////////////////////////////////////////////////// Triangle rob; bool has_collision = false; for (int i = 0; i < robot.size(); i++){ rob = robot[i]; float3 obs_planar_distances; // note: x, y, and z represent which the distances // from the triangle to the obstacle plane, not coordinates obs_planar_distances.x = dotc(obs_norm, rob.A) + obs_d; obs_planar_distances.y = dotc(obs_norm, rob.B) + obs_d; obs_planar_distances.z = dotc(obs_norm, rob.C) + obs_d; // coplanar case if (abs(obs_planar_distances.x + obs_planar_distances.y + obs_planar_distances.z) < 0.0001f) { //TODO, also refactor code so this can appear later } // may want to change 0 to some small threshhold above 0 to allow for coplanar case if ((obs_planar_distances.x > 0 && obs_planar_distances.y > 0 && obs_planar_distances.z > 0) || (obs_planar_distances.x < 0 && obs_planar_distances.y < 0 && obs_planar_distances.z < 0)){ continue; } /////////////////////////////////////////////////////////////////////////////////// // calculate the projection of the obstacle triangle against the robot triangle now float3 rob_vec1 = vecMinusc(obs.B, obs.A); float3 rob_vec2 = vecMinusc(obs.C, obs.A); float3 rob_norm = crossc(obs_vec1, obs_vec2); // scalar that satisfies obs_norm * X + obs_d = 0 float rob_d = -1 * dotc(obs_norm, obs.A); float3 rob_planar_distances; rob_planar_distances.x = dotc(rob_norm, obs.A) + rob_d; rob_planar_distances.y = dotc(rob_norm, obs.B) + rob_d; rob_planar_distances.z = dotc(rob_norm, obs.C) + rob_d; if ((rob_planar_distances.x > 0 && rob_planar_distances.y > 0 && rob_planar_distances.z > 0) || (rob_planar_distances.x < 0 && rob_planar_distances.y < 0 && rob_planar_distances.z < 0)){ continue; } /////////////////////////////////////////////////////////////////////////////////// float3 direction = crossc(rob_norm, obs_norm); // get points of obs intersecting line and corresponding planar distance float obs_intersect1, obs_intersect2; float obs_distance1, obs_distance2; if (rob_planar_distances.x > 0){ if (rob_planar_distances.y > 0){ obs_intersect1 = dotc(direction, obs.A); obs_intersect2 = dotc(direction, obs.B); obs_distance1 = rob_planar_distances.x; obs_distance2 = rob_planar_distances.y; } else { obs_intersect1 = dotc(direction, obs.A); obs_intersect2 = dotc(direction, obs.C); obs_distance1 = rob_planar_distances.x; obs_distance2 = rob_planar_distances.z; } } else { obs_intersect1 = dotc(direction, obs.B); obs_intersect2 = dotc(direction, obs.C); obs_distance1 = rob_planar_distances.y; obs_distance2 = rob_planar_distances.z; } // get points of rob intersecting line float rob_intersect1, rob_intersect2; float rob_distance1, rob_distance2; if (obs_planar_distances.x > 0){ if (obs_planar_distances.y > 0){ rob_intersect1 = dotc(direction, rob.A); rob_intersect2 = dotc(direction, rob.B); rob_distance1 = obs_planar_distances.x; rob_distance2 = obs_planar_distances.y; } else { rob_intersect1 = dotc(direction, rob.A); rob_intersect2 = dotc(direction, rob.C); rob_distance1 = obs_planar_distances.x; rob_distance2 = obs_planar_distances.z; } } else { rob_intersect1 = dotc(direction, rob.B); rob_intersect2 = dotc(direction, rob.C); rob_distance1 = obs_planar_distances.y; rob_distance2 = obs_planar_distances.z; } // should probably refactor these above if statements so that this is a part of it // get parameters such that intersection = obs_paramx * D float obs_param1 = getParamc( obs_intersect1, obs_intersect2, obs_distance1, obs_distance2); float obs_param2 = getParamc( obs_intersect2, obs_intersect1, obs_distance2, obs_distance1); float rob_param1 = getParamc( rob_intersect1, rob_intersect2, rob_distance1, rob_distance2); float rob_param2 = getParamc( rob_intersect2, rob_intersect1, rob_distance2, rob_distance1); // swap so that 1 is smaller if (obs_param1 > obs_param2) { float tmp = obs_param2; obs_param2 = obs_param1; obs_param1 = tmp; } if (rob_param1 > rob_param2) { float tmp = rob_param2; rob_param2 = rob_param1; rob_param1 = tmp; } if ( (obs_param2 < rob_param1) || obs_param1 > rob_param1) { continue; // no collision } else { has_collision = true; break; } } collisions[idx] = has_collision; } } int main(){ // load meshes // since we only want binary information about the presence of a collision // we can pass all of our meshes together as one concatenated array of triangles. // future work can involve calculating indices into this array of triangles by // also copying over an array of offsets into the device, allowing separation of individual // meshes. // note, this does not detect whether the robot is entirely inside the mesh // this problem can be ignored by choosing a step size smaller than the minimum radius // of the robot when implementing this as a part of RRT, since then we guarantee that at // least one point on the path will be in strictly intersecting an obstacle if there is // any collision at all. hipDeviceSynchronize(); for (int i = 10; i < 10000001 ; i *= 10){ std::cout << "\nTest with " << i << " obstacle triangles" <<std::endl; std::vector <Triangle> obstacles; std::vector <Triangle> robot; bool *collisions_GPU; bool *collisions_CPU; // bool ok = loadTrianglesEncapsulated("meshes/cube.obj", obstacles); bool ok = loadTrianglesEncapsulated("meshes/cube.obj", robot); addRandomTriangles(obstacles, i, 100, 100, 100, .01 ); if (!ok){ std::cout << "Error, mesh could not be read" <<std::endl; exit(-1); } collisions_GPU = new bool[obstacles.size()]; collisions_CPU = new bool[obstacles.size()]; // load meshes into GPU, timed Triangle *d_obstacles, *d_robot; bool *d_collisions; auto start = std::chrono::high_resolution_clock::now(); hipMalloc((void **) &d_obstacles, sizeof(Triangle) * obstacles.size()); hipMalloc((void **) &d_collisions, sizeof(bool) * obstacles.size()); hipMalloc((void **) &d_robot, sizeof(Triangle) * robot.size()); hipMemcpy(d_obstacles, &obstacles[0], obstacles.size() * sizeof(Triangle), hipMemcpyHostToDevice); hipMemcpy(d_robot, &robot[0], robot.size() * sizeof(Triangle), hipMemcpyHostToDevice); hipDeviceSynchronize(); auto end = std::chrono::high_resolution_clcollisions_GPUock::now(); std::chrono::duration<double> elapsed = end - start; std::cout << "GPU Memory writing took " << elapsed.count() << " seconds " << std::endl; // execute kernel, timed int numBlocks = obstacles.size() / 256 + 1; int num_obs_triangles = obstacles.size(); int num_rob_triangles = robot.size(); start = std::chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( detect_collision), dim3(numBlocks), dim3(256), 0, 0, d_obstacles, num_obs_triangles, d_robot, num_rob_triangles, d_collisions); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); // load result, timed elapsed = end - start; std::cout << "GPU Execution took " << elapsed.count() << " seconds " << std::endl; start = std::chrono::high_resolution_clock::now(); hipMemcpy(collisions_GPU, d_collisions, obstacles.size() * sizeof(bool), hipMemcpyDeviceToHost); hipFree(d_obstacles); hipFree(d_robot); hipFree(d_collisions); hipDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed = end - start; std::cout << "GPU Memory reading took " << elapsed.count() << " seconds " << std::endl; free (collisions_GPU); //CPU benchmarking start = std::chrono::high_resolution_clock::now(); detectCollisionCPU(robot, obstacles, collisions_CPU); end = std::chrono::high_resolution_clock::now(); elapsed = end - start; std::cout << "CPU execution took " << elapsed.count() << " seconds " << std::endl; int mismatches = 0; int GPU_falses = 0; int CPU_falses = 0; for (int j = 0; j < obstacles.size(); j++){ if (collisions_CPU[j]){ CPU_falses++; } if (collisions_GPU[j]){ GPU_falses++; } if (collisions_CPU[j] != collisions_GPU[j]){ mismatches++; } } std::cout << "Mismatches: " << mismatches << std::endl; std::cout << "GPU falses: " << GPU_falses << std::endl; std::cout << "CPU falses: " << CPU_falses << std::endl; free (collisions_CPU); } }
60c1093a6ea36373603634640e59b99442b5ab8d.cu
// https://web.stanford.edu/class/cs277/resources/papers/Moller1997b.pdf // ideas for further work - precompute all robot mesh norms #include "MeshParser.h" #include <chrono> __device__ float dot(float3 a, float3 b){ return ( a.x * b.x +a.y * b.y +a.z * b.z); } __device__ float3 vecMinus(float3 a, float3 b){ return {a.x - b.x, a.y - b.y, a.z - b.z}; } __device__ float3 cross(float3 a, float3 b){ return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x}; } // returns the t1 value as seen in equation (4) in the cited paper __device__ float getParam(float p0, float p1, float d0, float d1){ return (p0 + (p1 - p0) * (d0) / (d0 - d1)); } __global__ void detect_collision( Triangle *obstacles, size_t obs_size, Triangle *robot, size_t rob_size, bool *collisions){ int idx = threadIdx.x + blockIdx.x * blockDim.x; // edge case where thread doesn't matter if (idx >= obs_size){ return; } Triangle obs = obstacles[idx]; // calculate normal for our obstacle triangle //////////////////////////////////////////////////////////////////////// float3 obs_vec1 = vecMinus(obs.B, obs.A); float3 obs_vec2 = vecMinus(obs.C, obs.A); float3 obs_norm = cross(obs_vec1, obs_vec2); // scalar that satisfies obs_norm * X + obs_d = 0 float obs_d = -1 * dot(obs_norm, obs.A); //test for intersection against all robot triangles //////////////////////////////////////////////////////////////////////// Triangle rob; bool has_collision = false; for (int i = 0; i < rob_size; i++){ rob = robot[i]; float3 obs_planar_distances; // note: x, y, and z represent which the distances // from the triangle to the obstacle plane, not coordinates obs_planar_distances.x = dot(obs_norm, rob.A) + obs_d; obs_planar_distances.y = dot(obs_norm, rob.B) + obs_d; obs_planar_distances.z = dot(obs_norm, rob.C) + obs_d; // coplanar case //TODO add rosetta code to my citations if (abs(obs_planar_distances.x + obs_planar_distances.y + obs_planar_distances.z) < 0.0001f) { //TODO, also refactor code so this can appear later //TODO - project vertices onto a flat plane bool (*chkEdge)(TriPoint &, TriPoint &, TriPoint &, double) = NULL; has_collision = true; //For edge E of trangle 1, for(int i=0; i<3; i++) { int j=(i+1)%3; //Check all points of trangle 2 lay on the external side of the edge E. If //they do, the triangles do not collide. if (chkEdge(t1[i], t1[j], t2[0], eps) && chkEdge(t1[i], t1[j], t2[1], eps) && chkEdge(t1[i], t1[j], t2[2], eps)){ has_collision = false; break; } } if (!has_collision) //For edge E of trangle 2, for(int i=0; i<3; i++) { int j=(i+1)%3; //Check all points of trangle 1 lay on the external side of the edge E. If //they do, the triangles do not collide. if (chkEdge(t2[i], t2[j], t1[0], eps) && chkEdge(t2[i], t2[j], t1[1], eps) && chkEdge(t2[i], t2[j], t1[2], eps)){ has_collision = false; break; } } if(has_collision){ break; } else { continue; } } // may want to change 0 to some small threshhold above 0 to allow for coplanar case if ((obs_planar_distances.x > 0 && obs_planar_distances.y > 0 && obs_planar_distances.z > 0) || (obs_planar_distances.x < 0 && obs_planar_distances.y < 0 && obs_planar_distances.z < 0)){ continue; } /////////////////////////////////////////////////////////////////////////////////// // calculate the projection of the obstacle triangle against the robot triangle now float3 rob_vec1 = vecMinus(obs.B, obs.A); float3 rob_vec2 = vecMinus(obs.C, obs.A); float3 rob_norm = cross(obs_vec1, obs_vec2); // scalar that satisfies obs_norm * X + obs_d = 0 float rob_d = -1 * dot(obs_norm, obs.A); float3 rob_planar_distances; rob_planar_distances.x = dot(rob_norm, obs.A) + rob_d; rob_planar_distances.y = dot(rob_norm, obs.B) + rob_d; rob_planar_distances.z = dot(rob_norm, obs.C) + rob_d; if ((rob_planar_distances.x > 0 && rob_planar_distances.y > 0 && rob_planar_distances.z > 0) || (rob_planar_distances.x < 0 && rob_planar_distances.y < 0 && rob_planar_distances.z < 0)){ continue; } /////////////////////////////////////////////////////////////////////////////////// float3 direction = cross(rob_norm, obs_norm); // get points of obs intersecting line and corresponding planar distance float obs_intersect1, obs_intersect2; float obs_distance1, obs_distance2; if (rob_planar_distances.x > 0){ if (rob_planar_distances.y > 0){ obs_intersect1 = dot(direction, obs.A); obs_intersect2 = dot(direction, obs.B); obs_distance1 = rob_planar_distances.x; obs_distance2 = rob_planar_distances.y; } else { obs_intersect1 = dot(direction, obs.A); obs_intersect2 = dot(direction, obs.C); obs_distance1 = rob_planar_distances.x; obs_distance2 = rob_planar_distances.z; } } else { obs_intersect1 = dot(direction, obs.B); obs_intersect2 = dot(direction, obs.C); obs_distance1 = rob_planar_distances.y; obs_distance2 = rob_planar_distances.z; } // get points of rob intersecting line float rob_intersect1, rob_intersect2; float rob_distance1, rob_distance2; if (obs_planar_distances.x > 0){ if (obs_planar_distances.y > 0){ rob_intersect1 = dot(direction, rob.A); rob_intersect2 = dot(direction, rob.B); rob_distance1 = obs_planar_distances.x; rob_distance2 = obs_planar_distances.y; } else { rob_intersect1 = dot(direction, rob.A); rob_intersect2 = dot(direction, rob.C); rob_distance1 = obs_planar_distances.x; rob_distance2 = obs_planar_distances.z; } } else { rob_intersect1 = dot(direction, rob.B); rob_intersect2 = dot(direction, rob.C); rob_distance1 = obs_planar_distances.y; rob_distance2 = obs_planar_distances.z; } // should probably refactor these above if statements so that this is a part of it // get parameters such that intersection = obs_paramx * D float obs_param1 = getParam( obs_intersect1, obs_intersect2, obs_distance1, obs_distance2); float obs_param2 = getParam( obs_intersect2, obs_intersect1, obs_distance2, obs_distance1); float rob_param1 = getParam( rob_intersect1, rob_intersect2, rob_distance1, rob_distance2); float rob_param2 = getParam( rob_intersect2, rob_intersect1, rob_distance2, rob_distance1); // swap so that 1 is smaller if (obs_param1 > obs_param2) { float tmp = obs_param2; obs_param2 = obs_param1; obs_param1 = tmp; } if (rob_param1 > rob_param2) { float tmp = rob_param2; rob_param2 = rob_param1; rob_param1 = tmp; } if ( (obs_param2 < rob_param1) || obs_param1 > rob_param1) { continue; // no collision } else { has_collision = true; break; } } collisions[idx] = has_collision; } float dotc(float3 a, float3 b){ return ( (a.x * b.x) +(a.y * b.y) +(a.z * b.z)); } float3 vecMinusc(float3 a, float3 b){ return {a.x - b.x, a.y - b.y, a.z - b.z}; } float3 crossc(float3 a, float3 b){ return {a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x}; } // returns the t1 value as seen in equation (4) in the cited paper float getParamc(float p0, float p1, float d0, float d1){ return (p0 + (p1 - p0) * (d0) / (d0 - d1)); } void detectCollisionCPU(std::vector<Triangle> &robot, std::vector<Triangle> &obstacles, bool *collisions){ for (int idx = 0; idx < obstacles.size(); idx++){ Triangle obs = obstacles[idx]; // calculate normal for our obstacle triangle //////////////////////////////////////////////////////////////////////// float3 obs_vec1 = vecMinusc(obs.B, obs.A); float3 obs_vec2 = vecMinusc(obs.C, obs.A); float3 obs_norm = crossc(obs_vec1, obs_vec2); // scalar that satisfies obs_norm * X + obs_d = 0 float obs_d = -1 * dotc(obs_norm, obs.A); //test for intersection against all robot triangles //////////////////////////////////////////////////////////////////////// Triangle rob; bool has_collision = false; for (int i = 0; i < robot.size(); i++){ rob = robot[i]; float3 obs_planar_distances; // note: x, y, and z represent which the distances // from the triangle to the obstacle plane, not coordinates obs_planar_distances.x = dotc(obs_norm, rob.A) + obs_d; obs_planar_distances.y = dotc(obs_norm, rob.B) + obs_d; obs_planar_distances.z = dotc(obs_norm, rob.C) + obs_d; // coplanar case if (abs(obs_planar_distances.x + obs_planar_distances.y + obs_planar_distances.z) < 0.0001f) { //TODO, also refactor code so this can appear later } // may want to change 0 to some small threshhold above 0 to allow for coplanar case if ((obs_planar_distances.x > 0 && obs_planar_distances.y > 0 && obs_planar_distances.z > 0) || (obs_planar_distances.x < 0 && obs_planar_distances.y < 0 && obs_planar_distances.z < 0)){ continue; } /////////////////////////////////////////////////////////////////////////////////// // calculate the projection of the obstacle triangle against the robot triangle now float3 rob_vec1 = vecMinusc(obs.B, obs.A); float3 rob_vec2 = vecMinusc(obs.C, obs.A); float3 rob_norm = crossc(obs_vec1, obs_vec2); // scalar that satisfies obs_norm * X + obs_d = 0 float rob_d = -1 * dotc(obs_norm, obs.A); float3 rob_planar_distances; rob_planar_distances.x = dotc(rob_norm, obs.A) + rob_d; rob_planar_distances.y = dotc(rob_norm, obs.B) + rob_d; rob_planar_distances.z = dotc(rob_norm, obs.C) + rob_d; if ((rob_planar_distances.x > 0 && rob_planar_distances.y > 0 && rob_planar_distances.z > 0) || (rob_planar_distances.x < 0 && rob_planar_distances.y < 0 && rob_planar_distances.z < 0)){ continue; } /////////////////////////////////////////////////////////////////////////////////// float3 direction = crossc(rob_norm, obs_norm); // get points of obs intersecting line and corresponding planar distance float obs_intersect1, obs_intersect2; float obs_distance1, obs_distance2; if (rob_planar_distances.x > 0){ if (rob_planar_distances.y > 0){ obs_intersect1 = dotc(direction, obs.A); obs_intersect2 = dotc(direction, obs.B); obs_distance1 = rob_planar_distances.x; obs_distance2 = rob_planar_distances.y; } else { obs_intersect1 = dotc(direction, obs.A); obs_intersect2 = dotc(direction, obs.C); obs_distance1 = rob_planar_distances.x; obs_distance2 = rob_planar_distances.z; } } else { obs_intersect1 = dotc(direction, obs.B); obs_intersect2 = dotc(direction, obs.C); obs_distance1 = rob_planar_distances.y; obs_distance2 = rob_planar_distances.z; } // get points of rob intersecting line float rob_intersect1, rob_intersect2; float rob_distance1, rob_distance2; if (obs_planar_distances.x > 0){ if (obs_planar_distances.y > 0){ rob_intersect1 = dotc(direction, rob.A); rob_intersect2 = dotc(direction, rob.B); rob_distance1 = obs_planar_distances.x; rob_distance2 = obs_planar_distances.y; } else { rob_intersect1 = dotc(direction, rob.A); rob_intersect2 = dotc(direction, rob.C); rob_distance1 = obs_planar_distances.x; rob_distance2 = obs_planar_distances.z; } } else { rob_intersect1 = dotc(direction, rob.B); rob_intersect2 = dotc(direction, rob.C); rob_distance1 = obs_planar_distances.y; rob_distance2 = obs_planar_distances.z; } // should probably refactor these above if statements so that this is a part of it // get parameters such that intersection = obs_paramx * D float obs_param1 = getParamc( obs_intersect1, obs_intersect2, obs_distance1, obs_distance2); float obs_param2 = getParamc( obs_intersect2, obs_intersect1, obs_distance2, obs_distance1); float rob_param1 = getParamc( rob_intersect1, rob_intersect2, rob_distance1, rob_distance2); float rob_param2 = getParamc( rob_intersect2, rob_intersect1, rob_distance2, rob_distance1); // swap so that 1 is smaller if (obs_param1 > obs_param2) { float tmp = obs_param2; obs_param2 = obs_param1; obs_param1 = tmp; } if (rob_param1 > rob_param2) { float tmp = rob_param2; rob_param2 = rob_param1; rob_param1 = tmp; } if ( (obs_param2 < rob_param1) || obs_param1 > rob_param1) { continue; // no collision } else { has_collision = true; break; } } collisions[idx] = has_collision; } } int main(){ // load meshes // since we only want binary information about the presence of a collision // we can pass all of our meshes together as one concatenated array of triangles. // future work can involve calculating indices into this array of triangles by // also copying over an array of offsets into the device, allowing separation of individual // meshes. // note, this does not detect whether the robot is entirely inside the mesh // this problem can be ignored by choosing a step size smaller than the minimum radius // of the robot when implementing this as a part of RRT, since then we guarantee that at // least one point on the path will be in strictly intersecting an obstacle if there is // any collision at all. cudaDeviceSynchronize(); for (int i = 10; i < 10000001 ; i *= 10){ std::cout << "\nTest with " << i << " obstacle triangles" <<std::endl; std::vector <Triangle> obstacles; std::vector <Triangle> robot; bool *collisions_GPU; bool *collisions_CPU; // bool ok = loadTrianglesEncapsulated("meshes/cube.obj", obstacles); bool ok = loadTrianglesEncapsulated("meshes/cube.obj", robot); addRandomTriangles(obstacles, i, 100, 100, 100, .01 ); if (!ok){ std::cout << "Error, mesh could not be read" <<std::endl; exit(-1); } collisions_GPU = new bool[obstacles.size()]; collisions_CPU = new bool[obstacles.size()]; // load meshes into GPU, timed Triangle *d_obstacles, *d_robot; bool *d_collisions; auto start = std::chrono::high_resolution_clock::now(); cudaMalloc((void **) &d_obstacles, sizeof(Triangle) * obstacles.size()); cudaMalloc((void **) &d_collisions, sizeof(bool) * obstacles.size()); cudaMalloc((void **) &d_robot, sizeof(Triangle) * robot.size()); cudaMemcpy(d_obstacles, &obstacles[0], obstacles.size() * sizeof(Triangle), cudaMemcpyHostToDevice); cudaMemcpy(d_robot, &robot[0], robot.size() * sizeof(Triangle), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); auto end = std::chrono::high_resolution_clcollisions_GPUock::now(); std::chrono::duration<double> elapsed = end - start; std::cout << "GPU Memory writing took " << elapsed.count() << " seconds " << std::endl; // execute kernel, timed int numBlocks = obstacles.size() / 256 + 1; int num_obs_triangles = obstacles.size(); int num_rob_triangles = robot.size(); start = std::chrono::high_resolution_clock::now(); detect_collision<<< numBlocks, 256>>>(d_obstacles, num_obs_triangles, d_robot, num_rob_triangles, d_collisions); cudaDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); // load result, timed elapsed = end - start; std::cout << "GPU Execution took " << elapsed.count() << " seconds " << std::endl; start = std::chrono::high_resolution_clock::now(); cudaMemcpy(collisions_GPU, d_collisions, obstacles.size() * sizeof(bool), cudaMemcpyDeviceToHost); cudaFree(d_obstacles); cudaFree(d_robot); cudaFree(d_collisions); cudaDeviceSynchronize(); end = std::chrono::high_resolution_clock::now(); elapsed = end - start; std::cout << "GPU Memory reading took " << elapsed.count() << " seconds " << std::endl; free (collisions_GPU); //CPU benchmarking start = std::chrono::high_resolution_clock::now(); detectCollisionCPU(robot, obstacles, collisions_CPU); end = std::chrono::high_resolution_clock::now(); elapsed = end - start; std::cout << "CPU execution took " << elapsed.count() << " seconds " << std::endl; int mismatches = 0; int GPU_falses = 0; int CPU_falses = 0; for (int j = 0; j < obstacles.size(); j++){ if (collisions_CPU[j]){ CPU_falses++; } if (collisions_GPU[j]){ GPU_falses++; } if (collisions_CPU[j] != collisions_GPU[j]){ mismatches++; } } std::cout << "Mismatches: " << mismatches << std::endl; std::cout << "GPU falses: " << GPU_falses << std::endl; std::cout << "CPU falses: " << CPU_falses << std::endl; free (collisions_CPU); } }
90e77e1f3b220384001a5bc601aaa7b7a6888819.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "star2d1r-512-16-256_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 9 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_16), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 8) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 9) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 10) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 11) { { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 12) { { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 13) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 14) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 15) { { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 8) { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 9) { const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 10) { const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 11) { const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_11), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 12) { const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_12), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 13) { const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_13), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 14) { const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_14), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 15) { const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_15), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.1873f * A[t%2][i-1][j] + 0.1876f * A[t%2][i][j-1] + 0.2500f * A[t%2][i][j] + 0.1877f * A[t%2][i][j+1] + 0.1874f * A[t%2][i+1][j]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
90e77e1f3b220384001a5bc601aaa7b7a6888819.cu
#include <assert.h> #include <stdio.h> #include "star2d1r-512-16-256_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 9 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_16<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 8) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 9) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 10) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 11) { { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 12) { { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 13) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 14) { { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 15) { { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 8) { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 9) { const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 10) { const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 11) { const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_11<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 12) { const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_12<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 13) { const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_13<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 14) { const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_14<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 15) { const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_15<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = 0.1873f * A[t%2][i-1][j] + 0.1876f * A[t%2][i][j-1] + 0.2500f * A[t%2][i][j] + 0.1877f * A[t%2][i][j+1] + 0.1874f * A[t%2][i+1][j]; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
00172895edced511de4b3d25ba303bc22e16aa34.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip_runtime.h" // statistical kernel __global__ void prepare(hipLaunchParm lp, long d_Ne, fp *d_I, // pointer to output image (DEVICE GLOBAL MEMORY) fp *d_sums, // pointer to input image (DEVICE GLOBAL MEMORY) fp *d_sums2){ // indexes int bx = hipBlockIdx_x; // get current horizontal block index int tx = hipThreadIdx_x; // get current horizontal thread index int ei = (bx*NUMBER_THREADS)+tx; // unique thread id, more threads than actual elements !!! // copy input to output & log uncompress if(ei<d_Ne){ // do only for the number of elements, omit extra threads d_sums[ei] = d_I[ei]; d_sums2[ei] = d_I[ei]*d_I[ei]; } }
00172895edced511de4b3d25ba303bc22e16aa34.cu
#include "hip_runtime.h" // statistical kernel __global__ void prepare(hipLaunchParm lp, long d_Ne, fp *d_I, // pointer to output image (DEVICE GLOBAL MEMORY) fp *d_sums, // pointer to input image (DEVICE GLOBAL MEMORY) fp *d_sums2){ // indexes int bx = hipBlockIdx_x; // get current horizontal block index int tx = hipThreadIdx_x; // get current horizontal thread index int ei = (bx*NUMBER_THREADS)+tx; // unique thread id, more threads than actual elements !!! // copy input to output & log uncompress if(ei<d_Ne){ // do only for the number of elements, omit extra threads d_sums[ei] = d_I[ei]; d_sums2[ei] = d_I[ei]*d_I[ei]; } }
aae10e804b6309ef01a6506c261f17c0da8be383.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../ops.cuh" template <typename T> __global__ void scatter(const int num_nodes, const int feat_dim, const T *features, const int *pointers, const int *indices, T *__restrict__ next_layer) { int node_id = blockDim.y * blockIdx.x + threadIdx.y; int feat_id = threadIdx.x + blockDim.x * blockIdx.y; if (node_id >= num_nodes || feat_id >= feat_dim) return; T local = features[node_id * feat_dim + feat_id]; int target; int start = pointers[node_id]; int end = pointers[node_id + 1]; for (int i = start; i < end; i++) { target = indices[i] * feat_dim + feat_id; next_layer[target] = local; } } template __global__ void scatter<float>(const int num_nodes, const int feat_dim, const float *features, const int *pointers, const int *indices, float *__restrict__ next_layer); template __global__ void scatter<int64_t>(const int num_nodes, const int feat_dim, const int64_t *features, const int *pointers, const int *indices, int64_t *__restrict__ next_layer);
aae10e804b6309ef01a6506c261f17c0da8be383.cu
#include "../ops.cuh" template <typename T> __global__ void scatter(const int num_nodes, const int feat_dim, const T *features, const int *pointers, const int *indices, T *__restrict__ next_layer) { int node_id = blockDim.y * blockIdx.x + threadIdx.y; int feat_id = threadIdx.x + blockDim.x * blockIdx.y; if (node_id >= num_nodes || feat_id >= feat_dim) return; T local = features[node_id * feat_dim + feat_id]; int target; int start = pointers[node_id]; int end = pointers[node_id + 1]; for (int i = start; i < end; i++) { target = indices[i] * feat_dim + feat_id; next_layer[target] = local; } } template __global__ void scatter<float>(const int num_nodes, const int feat_dim, const float *features, const int *pointers, const int *indices, float *__restrict__ next_layer); template __global__ void scatter<int64_t>(const int num_nodes, const int feat_dim, const int64_t *features, const int *pointers, const int *indices, int64_t *__restrict__ next_layer);
216919e7acfc9fb4a77e8b1f99617ca23cf7ec81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> d, Mon Jun 25 18:24:27 2018 */ #include "magmasparse_internal.h" #define PRECISION_d #define BLOCKSIZE 256 __global__ void magma_dk_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_dbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_d_matrix *D, magma_d_matrix *R, const double * __restrict__ b, double * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ double local_x[ BLOCKSIZE ]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_dbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, double * valD, magma_index_t * rowD, magma_index_t * colD, double * valR, magma_index_t * rowR, magma_index_t * colR, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2, double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2, double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3, double * valD4, magma_index_t * rowD4, magma_index_t * colD4, double * valR4, magma_index_t * rowR4, magma_index_t * colR4, double * valD5, magma_index_t * rowD5, magma_index_t * colD5, double * valR5, magma_index_t * rowR5, magma_index_t * colR5, double * valD6, magma_index_t * rowD6, magma_index_t * colD6, double * valR6, magma_index_t * rowR6, magma_index_t * colR6, double * valD7, magma_index_t * rowD7, magma_index_t * colD7, double * valR7, magma_index_t * rowR7, magma_index_t * colR7, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16, double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17, double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18, double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19, double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20, double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21, double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22, double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23, double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24, double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25, double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26, double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27, double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28, double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29, double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30, double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16, double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17, double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18, double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19, double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20, double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21, double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22, double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23, double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24, double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25, double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26, double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27, double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28, double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29, double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30, double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31, double *valD32, magma_index_t *rowD32, magma_index_t *colD32, double *valR32, magma_index_t *rowR32, magma_index_t *colR32, double *valD33, magma_index_t *rowD33, magma_index_t *colD33, double *valR33, magma_index_t *rowR33, magma_index_t *colR33, double *valD34, magma_index_t *rowD34, magma_index_t *colD34, double *valR34, magma_index_t *rowR34, magma_index_t *colR34, double *valD35, magma_index_t *rowD35, magma_index_t *colD35, double *valR35, magma_index_t *rowR35, magma_index_t *colR35, double *valD36, magma_index_t *rowD36, magma_index_t *colD36, double *valR36, magma_index_t *rowR36, magma_index_t *colR36, double *valD37, magma_index_t *rowD37, magma_index_t *colD37, double *valR37, magma_index_t *rowR37, magma_index_t *colR37, double *valD38, magma_index_t *rowD38, magma_index_t *colD38, double *valR38, magma_index_t *rowR38, magma_index_t *colR38, double *valD39, magma_index_t *rowD39, magma_index_t *colD39, double *valR39, magma_index_t *rowR39, magma_index_t *colR39, double *valD40, magma_index_t *rowD40, magma_index_t *colD40, double *valR40, magma_index_t *rowR40, magma_index_t *colR40, double *valD41, magma_index_t *rowD41, magma_index_t *colD41, double *valR41, magma_index_t *rowR41, magma_index_t *colR41, double *valD42, magma_index_t *rowD42, magma_index_t *colD42, double *valR42, magma_index_t *rowR42, magma_index_t *colR42, double *valD43, magma_index_t *rowD43, magma_index_t *colD43, double *valR43, magma_index_t *rowR43, magma_index_t *colR43, double *valD44, magma_index_t *rowD44, magma_index_t *colD44, double *valR44, magma_index_t *rowR44, magma_index_t *colR44, double *valD45, magma_index_t *rowD45, magma_index_t *colD45, double *valR45, magma_index_t *rowR45, magma_index_t *colR45, double *valD46, magma_index_t *rowD46, magma_index_t *colD46, double *valR46, magma_index_t *rowR46, magma_index_t *colR46, double *valD47, magma_index_t *rowD47, magma_index_t *colD47, double *valR47, magma_index_t *rowR47, magma_index_t *colR47, double *valD48, magma_index_t *rowD48, magma_index_t *colD48, double *valR48, magma_index_t *rowR48, magma_index_t *colR48, double *valD49, magma_index_t *rowD49, magma_index_t *colD49, double *valR49, magma_index_t *rowR49, magma_index_t *colR49, double *valD50, magma_index_t *rowD50, magma_index_t *colD50, double *valR50, magma_index_t *rowR50, magma_index_t *colR50, double *valD51, magma_index_t *rowD51, magma_index_t *colD51, double *valR51, magma_index_t *rowR51, magma_index_t *colR51, double *valD52, magma_index_t *rowD52, magma_index_t *colD52, double *valR52, magma_index_t *rowR52, magma_index_t *colR52, double *valD53, magma_index_t *rowD53, magma_index_t *colD53, double *valR53, magma_index_t *rowR53, magma_index_t *colR53, double *valD54, magma_index_t *rowD54, magma_index_t *colD54, double *valR54, magma_index_t *rowR54, magma_index_t *colR54, double *valD55, magma_index_t *rowD55, magma_index_t *colD55, double *valR55, magma_index_t *rowR55, magma_index_t *colR55, double *valD56, magma_index_t *rowD56, magma_index_t *colD56, double *valR56, magma_index_t *rowR56, magma_index_t *colR56, double *valD57, magma_index_t *rowD57, magma_index_t *colD57, double *valR57, magma_index_t *rowR57, magma_index_t *colR57, double *valD58, magma_index_t *rowD58, magma_index_t *colD58, double *valR58, magma_index_t *rowR58, magma_index_t *colR58, double *valD59, magma_index_t *rowD59, magma_index_t *colD59, double *valR59, magma_index_t *rowR59, magma_index_t *colR59, double *valD60, magma_index_t *rowD60, magma_index_t *colD60, double *valR60, magma_index_t *rowR60, magma_index_t *colR60, double *valD61, magma_index_t *rowD61, magma_index_t *colD61, double *valR61, magma_index_t *rowR61, magma_index_t *colR61, double *valD62, magma_index_t *rowD62, magma_index_t *colD62, double *valR62, magma_index_t *rowR62, magma_index_t *colR62, double *valD63, magma_index_t *rowD63, magma_index_t *colD63, double *valR63, magma_index_t *rowR63, magma_index_t *colR63, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_d_matrix* set of matrices with diagonal blocks @param[in] R magma_d_matrix* set of matrices with non-diagonal parts @param[in] b magma_d_matrix RHS @param[in] x magma_d_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_d_matrix *D, magma_d_matrix *R, magma_d_matrix b, magma_d_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel1), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel2), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel4), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel8), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel16), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel32), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_dbajac_csr_o_ls_kernel64), dim3(grid), dim3(block), 0, queue->cuda_stream() , localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
216919e7acfc9fb4a77e8b1f99617ca23cf7ec81.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from sparse/blas/zbajac_csr_overlap.cu, normal z -> d, Mon Jun 25 18:24:27 2018 */ #include "magmasparse_internal.h" #define PRECISION_d #define BLOCKSIZE 256 __global__ void magma_dk_testLocking(unsigned int* locks, int n) { int id = threadIdx.x % n; bool leaveLoop = false; while (!leaveLoop) { if (atomicExch(&(locks[id]), 1u) == 0u) { //critical section leaveLoop = true; atomicExch(&(locks[id]),0u); } } } /* __global__ void magma_dbajac_csr_o_ls_kernel(int localiters, int n, int matrices, int overlap, magma_d_matrix *D, magma_d_matrix *R, const double * __restrict__ b, double * x ) { // int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; // int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; __shared__ double local_x[ BLOCKSIZE ]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; //valR = R[ (1+blockIdx.x-1)%matrices ].dval; //colR = R[ (1+blockIdx.x-1)%matrices ].dcol; //rowR = R[ (1+blockIdx.x-1)%matrices ].drow; //valD = D[ (1+blockIdx.x-1)%matrices ].dval; //colD = D[ (1+blockIdx.x-1)%matrices ].dcol; //rowD = D[ (1+blockIdx.x-1)%matrices ].drow; if (blockIdx.x%2 == 1) { valR = R[0].dval; valD = D[0].dval; colR = R[0].dcol; rowR = R[0].drow; colD = D[0].dcol; rowD = D[0].drow; } else { valR = R[1].dval; valD = D[1].dval; colR = R[1].dcol; rowR = R[1].drow; colD = D[1].dcol; rowD = D[1].drow; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; printf("bdx:%d idx:%d start:%d end:%d\n", blockIdx.x, threadIdx.x, start, end); #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif #pragma unroll for (i = start; i < end; i++) v += valR[i] * x[ colR[i] ]; start = rowD[index]; end = rowD[index+1]; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations local_x[threadIdx.x] = x[index]; //+ ( v - tmp); // / (valD[start]); __syncthreads(); #pragma unroll for (j = 0; j < localiters-1; j++) { tmp = zero; #pragma unroll for (i = start; i < end; i++) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if (threadIdx.x > overlap) { // RAS x[index] = local_x[threadIdx.x]; } } } */ __global__ void magma_dbajac_csr_o_ls_kernel1(int localiters, int n, int matrices, int overlap, double * valD, magma_index_t * rowD, magma_index_t * colD, double * valR, magma_index_t * rowR, magma_index_t * colR, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*blockDim.x; int index = blockIdx.x*blockDim.x+threadIdx.x; int i, j, start, end; //bool leaveLoop = false; if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel2(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*blockDim.x/2-blockDim.x/2; int index = blockIdx.x*blockDim.x/2+threadIdx.x-blockDim.x/2; int i, j, start, end; //bool leaveLoop = false; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if (blockIdx.x%matrices == 0) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if (blockIdx.x%matrices == 1) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel4(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2, double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; //bool leaveLoop = false; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==1 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==2 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==3 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel8(int localiters, int n, int matrices, int overlap, double * valD0, magma_index_t * rowD0, magma_index_t * colD0, double * valR0, magma_index_t * rowR0, magma_index_t * colR0, double * valD1, magma_index_t * rowD1, magma_index_t * colD1, double * valR1, magma_index_t * rowR1, magma_index_t * colR1, double * valD2, magma_index_t * rowD2, magma_index_t * colD2, double * valR2, magma_index_t * rowR2, magma_index_t * colR2, double * valD3, magma_index_t * rowD3, magma_index_t * colD3, double * valR3, magma_index_t * rowR3, magma_index_t * colR3, double * valD4, magma_index_t * rowD4, magma_index_t * colD4, double * valR4, magma_index_t * rowR4, magma_index_t * colR4, double * valD5, magma_index_t * rowD5, magma_index_t * colD5, double * valR5, magma_index_t * rowR5, magma_index_t * colR5, double * valD6, magma_index_t * rowD6, magma_index_t * colD6, double * valR6, magma_index_t * rowR6, magma_index_t * colR6, double * valD7, magma_index_t * rowD7, magma_index_t * colD7, double * valR7, magma_index_t * rowR7, magma_index_t * colR7, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if( blockIdx.x%matrices==0 ){ valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; }else if ( blockIdx.x%matrices==1 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; }else if ( blockIdx.x%matrices==2 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; }else if ( blockIdx.x%matrices==3 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; }else if ( blockIdx.x%matrices==4 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; }else if ( blockIdx.x%matrices==5 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; }else if ( blockIdx.x%matrices==6 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; }else if ( blockIdx.x%matrices==7 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel16(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==1 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==2 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==3 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==4 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==5 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==6 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==7 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==8 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==9 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==10 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==11 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==12 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==13 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==14 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==15 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel32(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16, double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17, double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18, double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19, double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20, double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21, double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22, double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23, double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24, double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25, double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26, double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27, double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28, double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29, double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30, double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==1 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==2 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==3 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==4 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==5 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==6 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==7 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==8 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==9 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==10 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==11 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==12 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==13 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==14 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==15 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==16 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==17 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==18 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==19 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==20 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==21 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==22 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==23 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==24 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==25 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==26 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==27 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==28 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==29 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==30 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==31 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } __global__ void magma_dbajac_csr_o_ls_kernel64(int localiters, int n, int matrices, int overlap, double *valD0 , magma_index_t *rowD0 , magma_index_t *colD0 , double *valR0 , magma_index_t *rowR0 , magma_index_t *colR0 , double *valD1 , magma_index_t *rowD1 , magma_index_t *colD1 , double *valR1 , magma_index_t *rowR1 , magma_index_t *colR1 , double *valD2 , magma_index_t *rowD2 , magma_index_t *colD2 , double *valR2 , magma_index_t *rowR2 , magma_index_t *colR2 , double *valD3 , magma_index_t *rowD3 , magma_index_t *colD3 , double *valR3 , magma_index_t *rowR3 , magma_index_t *colR3 , double *valD4 , magma_index_t *rowD4 , magma_index_t *colD4 , double *valR4 , magma_index_t *rowR4 , magma_index_t *colR4 , double *valD5 , magma_index_t *rowD5 , magma_index_t *colD5 , double *valR5 , magma_index_t *rowR5 , magma_index_t *colR5 , double *valD6 , magma_index_t *rowD6 , magma_index_t *colD6 , double *valR6 , magma_index_t *rowR6 , magma_index_t *colR6 , double *valD7 , magma_index_t *rowD7 , magma_index_t *colD7 , double *valR7 , magma_index_t *rowR7 , magma_index_t *colR7 , double *valD8 , magma_index_t *rowD8 , magma_index_t *colD8 , double *valR8 , magma_index_t *rowR8 , magma_index_t *colR8 , double *valD9 , magma_index_t *rowD9 , magma_index_t *colD9 , double *valR9 , magma_index_t *rowR9 , magma_index_t *colR9 , double *valD10, magma_index_t *rowD10, magma_index_t *colD10, double *valR10, magma_index_t *rowR10, magma_index_t *colR10, double *valD11, magma_index_t *rowD11, magma_index_t *colD11, double *valR11, magma_index_t *rowR11, magma_index_t *colR11, double *valD12, magma_index_t *rowD12, magma_index_t *colD12, double *valR12, magma_index_t *rowR12, magma_index_t *colR12, double *valD13, magma_index_t *rowD13, magma_index_t *colD13, double *valR13, magma_index_t *rowR13, magma_index_t *colR13, double *valD14, magma_index_t *rowD14, magma_index_t *colD14, double *valR14, magma_index_t *rowR14, magma_index_t *colR14, double *valD15, magma_index_t *rowD15, magma_index_t *colD15, double *valR15, magma_index_t *rowR15, magma_index_t *colR15, double *valD16, magma_index_t *rowD16, magma_index_t *colD16, double *valR16, magma_index_t *rowR16, magma_index_t *colR16, double *valD17, magma_index_t *rowD17, magma_index_t *colD17, double *valR17, magma_index_t *rowR17, magma_index_t *colR17, double *valD18, magma_index_t *rowD18, magma_index_t *colD18, double *valR18, magma_index_t *rowR18, magma_index_t *colR18, double *valD19, magma_index_t *rowD19, magma_index_t *colD19, double *valR19, magma_index_t *rowR19, magma_index_t *colR19, double *valD20, magma_index_t *rowD20, magma_index_t *colD20, double *valR20, magma_index_t *rowR20, magma_index_t *colR20, double *valD21, magma_index_t *rowD21, magma_index_t *colD21, double *valR21, magma_index_t *rowR21, magma_index_t *colR21, double *valD22, magma_index_t *rowD22, magma_index_t *colD22, double *valR22, magma_index_t *rowR22, magma_index_t *colR22, double *valD23, magma_index_t *rowD23, magma_index_t *colD23, double *valR23, magma_index_t *rowR23, magma_index_t *colR23, double *valD24, magma_index_t *rowD24, magma_index_t *colD24, double *valR24, magma_index_t *rowR24, magma_index_t *colR24, double *valD25, magma_index_t *rowD25, magma_index_t *colD25, double *valR25, magma_index_t *rowR25, magma_index_t *colR25, double *valD26, magma_index_t *rowD26, magma_index_t *colD26, double *valR26, magma_index_t *rowR26, magma_index_t *colR26, double *valD27, magma_index_t *rowD27, magma_index_t *colD27, double *valR27, magma_index_t *rowR27, magma_index_t *colR27, double *valD28, magma_index_t *rowD28, magma_index_t *colD28, double *valR28, magma_index_t *rowR28, magma_index_t *colR28, double *valD29, magma_index_t *rowD29, magma_index_t *colD29, double *valR29, magma_index_t *rowR29, magma_index_t *colR29, double *valD30, magma_index_t *rowD30, magma_index_t *colD30, double *valR30, magma_index_t *rowR30, magma_index_t *colR30, double *valD31, magma_index_t *rowD31, magma_index_t *colD31, double *valR31, magma_index_t *rowR31, magma_index_t *colR31, double *valD32, magma_index_t *rowD32, magma_index_t *colD32, double *valR32, magma_index_t *rowR32, magma_index_t *colR32, double *valD33, magma_index_t *rowD33, magma_index_t *colD33, double *valR33, magma_index_t *rowR33, magma_index_t *colR33, double *valD34, magma_index_t *rowD34, magma_index_t *colD34, double *valR34, magma_index_t *rowR34, magma_index_t *colR34, double *valD35, magma_index_t *rowD35, magma_index_t *colD35, double *valR35, magma_index_t *rowR35, magma_index_t *colR35, double *valD36, magma_index_t *rowD36, magma_index_t *colD36, double *valR36, magma_index_t *rowR36, magma_index_t *colR36, double *valD37, magma_index_t *rowD37, magma_index_t *colD37, double *valR37, magma_index_t *rowR37, magma_index_t *colR37, double *valD38, magma_index_t *rowD38, magma_index_t *colD38, double *valR38, magma_index_t *rowR38, magma_index_t *colR38, double *valD39, magma_index_t *rowD39, magma_index_t *colD39, double *valR39, magma_index_t *rowR39, magma_index_t *colR39, double *valD40, magma_index_t *rowD40, magma_index_t *colD40, double *valR40, magma_index_t *rowR40, magma_index_t *colR40, double *valD41, magma_index_t *rowD41, magma_index_t *colD41, double *valR41, magma_index_t *rowR41, magma_index_t *colR41, double *valD42, magma_index_t *rowD42, magma_index_t *colD42, double *valR42, magma_index_t *rowR42, magma_index_t *colR42, double *valD43, magma_index_t *rowD43, magma_index_t *colD43, double *valR43, magma_index_t *rowR43, magma_index_t *colR43, double *valD44, magma_index_t *rowD44, magma_index_t *colD44, double *valR44, magma_index_t *rowR44, magma_index_t *colR44, double *valD45, magma_index_t *rowD45, magma_index_t *colD45, double *valR45, magma_index_t *rowR45, magma_index_t *colR45, double *valD46, magma_index_t *rowD46, magma_index_t *colD46, double *valR46, magma_index_t *rowR46, magma_index_t *colR46, double *valD47, magma_index_t *rowD47, magma_index_t *colD47, double *valR47, magma_index_t *rowR47, magma_index_t *colR47, double *valD48, magma_index_t *rowD48, magma_index_t *colD48, double *valR48, magma_index_t *rowR48, magma_index_t *colR48, double *valD49, magma_index_t *rowD49, magma_index_t *colD49, double *valR49, magma_index_t *rowR49, magma_index_t *colR49, double *valD50, magma_index_t *rowD50, magma_index_t *colD50, double *valR50, magma_index_t *rowR50, magma_index_t *colR50, double *valD51, magma_index_t *rowD51, magma_index_t *colD51, double *valR51, magma_index_t *rowR51, magma_index_t *colR51, double *valD52, magma_index_t *rowD52, magma_index_t *colD52, double *valR52, magma_index_t *rowR52, magma_index_t *colR52, double *valD53, magma_index_t *rowD53, magma_index_t *colD53, double *valR53, magma_index_t *rowR53, magma_index_t *colR53, double *valD54, magma_index_t *rowD54, magma_index_t *colD54, double *valR54, magma_index_t *rowR54, magma_index_t *colR54, double *valD55, magma_index_t *rowD55, magma_index_t *colD55, double *valR55, magma_index_t *rowR55, magma_index_t *colR55, double *valD56, magma_index_t *rowD56, magma_index_t *colD56, double *valR56, magma_index_t *rowR56, magma_index_t *colR56, double *valD57, magma_index_t *rowD57, magma_index_t *colD57, double *valR57, magma_index_t *rowR57, magma_index_t *colR57, double *valD58, magma_index_t *rowD58, magma_index_t *colD58, double *valR58, magma_index_t *rowR58, magma_index_t *colR58, double *valD59, magma_index_t *rowD59, magma_index_t *colD59, double *valR59, magma_index_t *rowR59, magma_index_t *colR59, double *valD60, magma_index_t *rowD60, magma_index_t *colD60, double *valR60, magma_index_t *rowR60, magma_index_t *colR60, double *valD61, magma_index_t *rowD61, magma_index_t *colD61, double *valR61, magma_index_t *rowR61, magma_index_t *colR61, double *valD62, magma_index_t *rowD62, magma_index_t *colD62, double *valR62, magma_index_t *rowR62, magma_index_t *colR62, double *valD63, magma_index_t *rowD63, magma_index_t *colD63, double *valR63, magma_index_t *rowR63, magma_index_t *colR63, const double * __restrict__ b, double * x ) { int inddiag = blockIdx.x*(blockDim.x - overlap) - overlap; int index = blockIdx.x*(blockDim.x - overlap) - overlap + threadIdx.x; int i, j, start, end; double zero = MAGMA_D_MAKE(0.0, 0.0); double bl, tmp = zero, v = zero; double *valR, *valD; magma_index_t *colR, *rowR, *colD, *rowD; if ( blockIdx.x%matrices==0 ) { valR = valR63; valD = valD63; colR = colR63; rowR = rowR63; colD = colD63; rowD = rowD63; } else if ( blockIdx.x%matrices==1 ) { valR = valR62; valD = valD62; colR = colR62; rowR = rowR62; colD = colD62; rowD = rowD62; } else if ( blockIdx.x%matrices==2 ) { valR = valR61; valD = valD61; colR = colR61; rowR = rowR61; colD = colD61; rowD = rowD61; } else if ( blockIdx.x%matrices==3 ) { valR = valR60; valD = valD60; colR = colR60; rowR = rowR60; colD = colD60; rowD = rowD60; } else if ( blockIdx.x%matrices==4 ) { valR = valR59; valD = valD59; colR = colR59; rowR = rowR59; colD = colD59; rowD = rowD59; } else if ( blockIdx.x%matrices==5 ) { valR = valR58; valD = valD58; colR = colR58; rowR = rowR58; colD = colD58; rowD = rowD58; } else if ( blockIdx.x%matrices==6 ) { valR = valR57; valD = valD57; colR = colR57; rowR = rowR57; colD = colD57; rowD = rowD57; } else if ( blockIdx.x%matrices==7 ) { valR = valR56; valD = valD56; colR = colR56; rowR = rowR56; colD = colD56; rowD = rowD56; } else if ( blockIdx.x%matrices==8 ) { valR = valR55; valD = valD55; colR = colR55; rowR = rowR55; colD = colD55; rowD = rowD55; } else if ( blockIdx.x%matrices==9 ) { valR = valR54; valD = valD54; colR = colR54; rowR = rowR54; colD = colD54; rowD = rowD54; } else if ( blockIdx.x%matrices==10 ) { valR = valR53; valD = valD53; colR = colR53; rowR = rowR53; colD = colD53; rowD = rowD53; } else if ( blockIdx.x%matrices==11 ) { valR = valR52; valD = valD52; colR = colR52; rowR = rowR52; colD = colD52; rowD = rowD52; } else if ( blockIdx.x%matrices==12 ) { valR = valR51; valD = valD51; colR = colR51; rowR = rowR51; colD = colD51; rowD = rowD51; } else if ( blockIdx.x%matrices==13 ) { valR = valR50; valD = valD50; colR = colR50; rowR = rowR50; colD = colD50; rowD = rowD50; } else if ( blockIdx.x%matrices==14 ) { valR = valR49; valD = valD49; colR = colR49; rowR = rowR49; colD = colD49; rowD = rowD49; } else if ( blockIdx.x%matrices==15 ) { valR = valR48; valD = valD48; colR = colR48; rowR = rowR48; colD = colD48; rowD = rowD48; } else if ( blockIdx.x%matrices==16 ) { valR = valR47; valD = valD47; colR = colR47; rowR = rowR47; colD = colD47; rowD = rowD47; } else if ( blockIdx.x%matrices==17 ) { valR = valR46; valD = valD46; colR = colR46; rowR = rowR46; colD = colD46; rowD = rowD46; } else if ( blockIdx.x%matrices==18 ) { valR = valR45; valD = valD45; colR = colR45; rowR = rowR45; colD = colD45; rowD = rowD45; } else if ( blockIdx.x%matrices==19 ) { valR = valR44; valD = valD44; colR = colR44; rowR = rowR44; colD = colD44; rowD = rowD44; } else if ( blockIdx.x%matrices==20 ) { valR = valR43; valD = valD43; colR = colR43; rowR = rowR43; colD = colD43; rowD = rowD43; } else if ( blockIdx.x%matrices==21 ) { valR = valR42; valD = valD42; colR = colR42; rowR = rowR42; colD = colD42; rowD = rowD42; } else if ( blockIdx.x%matrices==22 ) { valR = valR41; valD = valD41; colR = colR41; rowR = rowR41; colD = colD41; rowD = rowD41; } else if ( blockIdx.x%matrices==23 ) { valR = valR40; valD = valD40; colR = colR40; rowR = rowR40; colD = colD40; rowD = rowD40; } else if ( blockIdx.x%matrices==24 ) { valR = valR39; valD = valD39; colR = colR39; rowR = rowR39; colD = colD39; rowD = rowD39; } else if ( blockIdx.x%matrices==25 ) { valR = valR38; valD = valD38; colR = colR38; rowR = rowR38; colD = colD38; rowD = rowD38; } else if ( blockIdx.x%matrices==26 ) { valR = valR37; valD = valD37; colR = colR37; rowR = rowR37; colD = colD37; rowD = rowD37; } else if ( blockIdx.x%matrices==27 ) { valR = valR36; valD = valD36; colR = colR36; rowR = rowR36; colD = colD36; rowD = rowD36; } else if ( blockIdx.x%matrices==28 ) { valR = valR35; valD = valD35; colR = colR35; rowR = rowR35; colD = colD35; rowD = rowD35; } else if ( blockIdx.x%matrices==29 ) { valR = valR34; valD = valD34; colR = colR34; rowR = rowR34; colD = colD34; rowD = rowD34; } else if ( blockIdx.x%matrices==30 ) { valR = valR33; valD = valD33; colR = colR33; rowR = rowR33; colD = colD33; rowD = rowD33; } else if ( blockIdx.x%matrices==31 ) { valR = valR32; valD = valD32; colR = colR32; rowR = rowR32; colD = colD32; rowD = rowD32; } else if ( blockIdx.x%matrices==32 ) { valR = valR31; valD = valD31; colR = colR31; rowR = rowR31; colD = colD31; rowD = rowD31; } else if ( blockIdx.x%matrices==33 ) { valR = valR30; valD = valD30; colR = colR30; rowR = rowR30; colD = colD30; rowD = rowD30; } else if ( blockIdx.x%matrices==34 ) { valR = valR29; valD = valD29; colR = colR29; rowR = rowR29; colD = colD29; rowD = rowD29; } else if ( blockIdx.x%matrices==35 ) { valR = valR28; valD = valD28; colR = colR28; rowR = rowR28; colD = colD28; rowD = rowD28; } else if ( blockIdx.x%matrices==36 ) { valR = valR27; valD = valD27; colR = colR27; rowR = rowR27; colD = colD27; rowD = rowD27; } else if ( blockIdx.x%matrices==37 ) { valR = valR26; valD = valD26; colR = colR26; rowR = rowR26; colD = colD26; rowD = rowD26; } else if ( blockIdx.x%matrices==38 ) { valR = valR25; valD = valD25; colR = colR25; rowR = rowR25; colD = colD25; rowD = rowD25; } else if ( blockIdx.x%matrices==39 ) { valR = valR24; valD = valD24; colR = colR24; rowR = rowR24; colD = colD24; rowD = rowD24; } else if ( blockIdx.x%matrices==40 ) { valR = valR23; valD = valD23; colR = colR23; rowR = rowR23; colD = colD23; rowD = rowD23; } else if ( blockIdx.x%matrices==41 ) { valR = valR22; valD = valD22; colR = colR22; rowR = rowR22; colD = colD22; rowD = rowD22; } else if ( blockIdx.x%matrices==42 ) { valR = valR21; valD = valD21; colR = colR21; rowR = rowR21; colD = colD21; rowD = rowD21; } else if ( blockIdx.x%matrices==43 ) { valR = valR20; valD = valD20; colR = colR20; rowR = rowR20; colD = colD20; rowD = rowD20; } else if ( blockIdx.x%matrices==44 ) { valR = valR19; valD = valD19; colR = colR19; rowR = rowR19; colD = colD19; rowD = rowD19; } else if ( blockIdx.x%matrices==45 ) { valR = valR18; valD = valD18; colR = colR18; rowR = rowR18; colD = colD18; rowD = rowD18; } else if ( blockIdx.x%matrices==46 ) { valR = valR17; valD = valD17; colR = colR17; rowR = rowR17; colD = colD17; rowD = rowD17; } else if ( blockIdx.x%matrices==47 ) { valR = valR16; valD = valD16; colR = colR16; rowR = rowR16; colD = colD16; rowD = rowD16; } else if ( blockIdx.x%matrices==48 ) { valR = valR15; valD = valD15; colR = colR15; rowR = rowR15; colD = colD15; rowD = rowD15; } else if ( blockIdx.x%matrices==49 ) { valR = valR14; valD = valD14; colR = colR14; rowR = rowR14; colD = colD14; rowD = rowD14; } else if ( blockIdx.x%matrices==50 ) { valR = valR13; valD = valD13; colR = colR13; rowR = rowR13; colD = colD13; rowD = rowD13; } else if ( blockIdx.x%matrices==51 ) { valR = valR12; valD = valD12; colR = colR12; rowR = rowR12; colD = colD12; rowD = rowD12; } else if ( blockIdx.x%matrices==52 ) { valR = valR11; valD = valD11; colR = colR11; rowR = rowR11; colD = colD11; rowD = rowD11; } else if ( blockIdx.x%matrices==53 ) { valR = valR10; valD = valD10; colR = colR10; rowR = rowR10; colD = colD10; rowD = rowD10; } else if ( blockIdx.x%matrices==54 ) { valR = valR9; valD = valD9; colR = colR9; rowR = rowR9; colD = colD9; rowD = rowD9; } else if ( blockIdx.x%matrices==55 ) { valR = valR8; valD = valD8; colR = colR8; rowR = rowR8; colD = colD8; rowD = rowD8; } else if ( blockIdx.x%matrices==56 ) { valR = valR7; valD = valD7; colR = colR7; rowR = rowR7; colD = colD7; rowD = rowD7; } else if ( blockIdx.x%matrices==57 ) { valR = valR6; valD = valD6; colR = colR6; rowR = rowR6; colD = colD6; rowD = rowD6; } else if ( blockIdx.x%matrices==58 ) { valR = valR5; valD = valD5; colR = colR5; rowR = rowR5; colD = colD5; rowD = rowD5; } else if ( blockIdx.x%matrices==59 ) { valR = valR4; valD = valD4; colR = colR4; rowR = rowR4; colD = colD4; rowD = rowD4; } else if ( blockIdx.x%matrices==60 ) { valR = valR3; valD = valD3; colR = colR3; rowR = rowR3; colD = colD3; rowD = rowD3; } else if ( blockIdx.x%matrices==61 ) { valR = valR2; valD = valD2; colR = colR2; rowR = rowR2; colD = colD2; rowD = rowD2; } else if ( blockIdx.x%matrices==62 ) { valR = valR1; valD = valD1; colR = colR1; rowR = rowR1; colD = colD1; rowD = rowD1; } else if ( blockIdx.x%matrices==63 ) { valR = valR0; valD = valD0; colR = colR0; rowR = rowR0; colD = colD0; rowD = rowD0; } if ( index>-1 && index < n ) { start = rowR[index]; end = rowR[index+1]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) bl = __ldg( b+index ); #else bl = b[index]; #endif if( start != end ){ #pragma unroll for( i=start; i<end; i++ ) v += valR[i] * x[ colR[i] ]; } start = rowD[index]; end = rowD[index+1]; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * x[ colD[i] ]; v = bl - v; // add more local iterations __shared__ double local_x[ BLOCKSIZE ]; local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]); __syncthreads(); #pragma unroll for( j=0; j<localiters-1; j++ ) { tmp = zero; #pragma unroll for( i=start; i<end; i++ ) tmp += valD[i] * local_x[ colD[i] - inddiag]; local_x[threadIdx.x] += ( v - tmp) / (valD[start]); } if( threadIdx.x >= overlap ) { // only write back the lower subdomain x[index] = local_x[threadIdx.x]; } } } /** Purpose ------- This routine is a block-asynchronous Jacobi iteration with directed restricted additive Schwarz overlap (top-down) performing s local Jacobi-updates within the block. Input format is two CSR matrices, one containing the diagonal blocks, one containing the rest. Arguments --------- @param[in] localiters magma_int_t number of local Jacobi-like updates @param[in] matrices magma_int_t number of sub-matrices @param[in] overlap magma_int_t size of the overlap @param[in] D magma_d_matrix* set of matrices with diagonal blocks @param[in] R magma_d_matrix* set of matrices with non-diagonal parts @param[in] b magma_d_matrix RHS @param[in] x magma_d_matrix* iterate/solution @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dbajac_csr_overlap( magma_int_t localiters, magma_int_t matrices, magma_int_t overlap, magma_d_matrix *D, magma_d_matrix *R, magma_d_matrix b, magma_d_matrix *x, magma_queue_t queue ) { int blocksize1 = BLOCKSIZE; int blocksize2 = 1; int size = D[0].num_rows; int min_nnz=100; for(int i=0; i<matrices; i++){ min_nnz = min(min_nnz, R[i].nnz); } if ( min_nnz > -1 ){ if ( matrices == 1 ){ int dimgrid1 = magma_ceildiv( size , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel1<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, b.dval, x->dval ); } else if (matrices == 2) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel2<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 4){ int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel4<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 8) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel8<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[0].dval, D[0].drow, D[0].dcol, R[0].dval, R[0].drow, R[0].dcol, D[1].dval, D[1].drow, D[1].dcol, R[1].dval, R[1].drow, R[1].dcol, D[2].dval, D[2].drow, D[2].dcol, R[2].dval, R[2].drow, R[2].dcol, D[3].dval, D[3].drow, D[3].dcol, R[3].dval, R[3].drow, R[3].dcol, D[4].dval, D[4].drow, D[4].dcol, R[4].dval, R[4].drow, R[4].dcol, D[5].dval, D[5].drow, D[5].dcol, R[5].dval, R[5].drow, R[5].dcol, D[6].dval, D[6].drow, D[6].dcol, R[6].dval, R[6].drow, R[6].dcol, D[7].dval, D[7].drow, D[7].dcol, R[7].dval, R[7].drow, R[7].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else if (matrices == 16) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel16<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, b.dval, x->dval ); } else if (matrices == 32) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel32<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, b.dval, x->dval ); } else if (matrices == 64) { int dimgrid1 = magma_ceildiv( size * blocksize1/(blocksize1-overlap) , blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_dbajac_csr_o_ls_kernel64<<< grid, block, 0, queue->cuda_stream() >>> ( localiters, size, matrices, overlap, D[ 0].dval, D[ 0].drow, D[ 0].dcol, R[ 0].dval, R[ 0].drow, R[ 0].dcol, D[ 1].dval, D[ 1].drow, D[ 1].dcol, R[ 1].dval, R[ 1].drow, R[ 1].dcol, D[ 2].dval, D[ 2].drow, D[ 2].dcol, R[ 2].dval, R[ 2].drow, R[ 2].dcol, D[ 3].dval, D[ 3].drow, D[ 3].dcol, R[ 3].dval, R[ 3].drow, R[ 3].dcol, D[ 4].dval, D[ 4].drow, D[ 4].dcol, R[ 4].dval, R[ 4].drow, R[ 4].dcol, D[ 5].dval, D[ 5].drow, D[ 5].dcol, R[ 5].dval, R[ 5].drow, R[ 5].dcol, D[ 6].dval, D[ 6].drow, D[ 6].dcol, R[ 6].dval, R[ 6].drow, R[ 6].dcol, D[ 7].dval, D[ 7].drow, D[ 7].dcol, R[ 7].dval, R[ 7].drow, R[ 7].dcol, D[ 8].dval, D[ 8].drow, D[ 8].dcol, R[ 8].dval, R[ 8].drow, R[ 8].dcol, D[ 9].dval, D[ 9].drow, D[ 9].dcol, R[ 9].dval, R[ 9].drow, R[ 9].dcol, D[10].dval, D[10].drow, D[10].dcol, R[10].dval, R[10].drow, R[10].dcol, D[11].dval, D[11].drow, D[11].dcol, R[11].dval, R[11].drow, R[11].dcol, D[12].dval, D[12].drow, D[12].dcol, R[12].dval, R[12].drow, R[12].dcol, D[13].dval, D[13].drow, D[13].dcol, R[13].dval, R[13].drow, R[13].dcol, D[14].dval, D[14].drow, D[14].dcol, R[14].dval, R[14].drow, R[14].dcol, D[15].dval, D[15].drow, D[15].dcol, R[15].dval, R[15].drow, R[15].dcol, D[16].dval, D[16].drow, D[16].dcol, R[16].dval, R[16].drow, R[16].dcol, D[17].dval, D[17].drow, D[17].dcol, R[17].dval, R[17].drow, R[17].dcol, D[18].dval, D[18].drow, D[18].dcol, R[18].dval, R[18].drow, R[18].dcol, D[19].dval, D[19].drow, D[19].dcol, R[19].dval, R[19].drow, R[19].dcol, D[20].dval, D[20].drow, D[20].dcol, R[20].dval, R[20].drow, R[20].dcol, D[21].dval, D[21].drow, D[21].dcol, R[21].dval, R[21].drow, R[21].dcol, D[22].dval, D[22].drow, D[22].dcol, R[22].dval, R[22].drow, R[22].dcol, D[23].dval, D[23].drow, D[23].dcol, R[23].dval, R[23].drow, R[23].dcol, D[24].dval, D[24].drow, D[24].dcol, R[24].dval, R[24].drow, R[24].dcol, D[25].dval, D[25].drow, D[25].dcol, R[25].dval, R[25].drow, R[25].dcol, D[26].dval, D[26].drow, D[26].dcol, R[26].dval, R[26].drow, R[26].dcol, D[27].dval, D[27].drow, D[27].dcol, R[27].dval, R[27].drow, R[27].dcol, D[28].dval, D[28].drow, D[28].dcol, R[28].dval, R[28].drow, R[28].dcol, D[29].dval, D[29].drow, D[29].dcol, R[29].dval, R[29].drow, R[29].dcol, D[30].dval, D[30].drow, D[30].dcol, R[30].dval, R[30].drow, R[30].dcol, D[31].dval, D[31].drow, D[31].dcol, R[31].dval, R[31].drow, R[31].dcol, D[32].dval, D[32].drow, D[32].dcol, R[32].dval, R[32].drow, R[32].dcol, D[33].dval, D[33].drow, D[33].dcol, R[33].dval, R[33].drow, R[33].dcol, D[34].dval, D[34].drow, D[34].dcol, R[34].dval, R[34].drow, R[34].dcol, D[35].dval, D[35].drow, D[35].dcol, R[35].dval, R[35].drow, R[35].dcol, D[36].dval, D[36].drow, D[36].dcol, R[36].dval, R[36].drow, R[36].dcol, D[37].dval, D[37].drow, D[37].dcol, R[37].dval, R[37].drow, R[37].dcol, D[38].dval, D[38].drow, D[38].dcol, R[38].dval, R[38].drow, R[38].dcol, D[39].dval, D[39].drow, D[39].dcol, R[39].dval, R[39].drow, R[39].dcol, D[40].dval, D[40].drow, D[40].dcol, R[40].dval, R[40].drow, R[40].dcol, D[41].dval, D[41].drow, D[41].dcol, R[41].dval, R[41].drow, R[41].dcol, D[42].dval, D[42].drow, D[42].dcol, R[42].dval, R[42].drow, R[42].dcol, D[43].dval, D[43].drow, D[43].dcol, R[43].dval, R[43].drow, R[43].dcol, D[44].dval, D[44].drow, D[44].dcol, R[44].dval, R[44].drow, R[44].dcol, D[45].dval, D[45].drow, D[45].dcol, R[45].dval, R[45].drow, R[45].dcol, D[46].dval, D[46].drow, D[46].dcol, R[46].dval, R[46].drow, R[46].dcol, D[47].dval, D[47].drow, D[47].dcol, R[47].dval, R[47].drow, R[47].dcol, D[48].dval, D[48].drow, D[48].dcol, R[48].dval, R[48].drow, R[48].dcol, D[49].dval, D[49].drow, D[49].dcol, R[49].dval, R[49].drow, R[49].dcol, D[50].dval, D[50].drow, D[50].dcol, R[50].dval, R[50].drow, R[50].dcol, D[51].dval, D[51].drow, D[51].dcol, R[51].dval, R[51].drow, R[51].dcol, D[52].dval, D[52].drow, D[52].dcol, R[52].dval, R[52].drow, R[52].dcol, D[53].dval, D[53].drow, D[53].dcol, R[53].dval, R[53].drow, R[53].dcol, D[54].dval, D[54].drow, D[54].dcol, R[54].dval, R[54].drow, R[54].dcol, D[55].dval, D[55].drow, D[55].dcol, R[55].dval, R[55].drow, R[55].dcol, D[56].dval, D[56].drow, D[56].dcol, R[56].dval, R[56].drow, R[56].dcol, D[57].dval, D[57].drow, D[57].dcol, R[57].dval, R[57].drow, R[57].dcol, D[58].dval, D[58].drow, D[58].dcol, R[58].dval, R[58].drow, R[58].dcol, D[59].dval, D[59].drow, D[59].dcol, R[59].dval, R[59].drow, R[59].dcol, D[60].dval, D[60].drow, D[60].dcol, R[60].dval, R[60].drow, R[60].dcol, D[61].dval, D[61].drow, D[61].dcol, R[61].dval, R[61].drow, R[61].dcol, D[62].dval, D[62].drow, D[62].dcol, R[62].dval, R[62].drow, R[62].dcol, D[63].dval, D[63].drow, D[63].dcol, R[63].dval, R[63].drow, R[63].dcol, b.dval, x->dval ); //magma_dbajac_csr_o_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>> // ( localiters, size, matrices, overlap, D, R, b.dval, x->dval ); } else { printf("error: invalid matrix count.\n"); } } else { printf("error: all elements in diagonal block.\n"); } return MAGMA_SUCCESS; }
c34777340a44a13e2b297d7a567b125e8a25d355.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zmgeelltmv.cu normal z -> c, Tue Sep 2 12:38:33 2014 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void cmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { extern __shared__ magmaFloatComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = d_colind [ num_rows * n + row ]; magmaFloatComplex val = d_val [ num_rows * n + row ]; if( val != 0){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * d_x[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * d_y [ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param num_vecs mama_int_t number of vectors @param nnz_per_row magma_int_t number of elements in the longest row @param alpha magmaFloatComplex scalar multiplier @param d_val magmaFloatComplex* array containing values of A in ELL @param d_colind magma_int_t* columnindices of A in ELL @param d_x magmaFloatComplex* input vector x @param beta magmaFloatComplex scalar multiplier @param d_y magmaFloatComplex* input/output vector y @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaFloatComplex ); // num_vecs vectors hipLaunchKernelGGL(( cmgeelltmv_kernel), dim3(grid), dim3(BLOCK_SIZE), MEM_SIZE , 0, m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y ); return MAGMA_SUCCESS; }
c34777340a44a13e2b297d7a567b125e8a25d355.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zmgeelltmv.cu normal z -> c, Tue Sep 2 12:38:33 2014 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void cmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y) { extern __shared__ magmaFloatComplex dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_C_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = d_colind [ num_rows * n + row ]; magmaFloatComplex val = d_val [ num_rows * n + row ]; if( val != 0){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * d_x[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) d_y[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * d_y [ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param num_vecs mama_int_t number of vectors @param nnz_per_row magma_int_t number of elements in the longest row @param alpha magmaFloatComplex scalar multiplier @param d_val magmaFloatComplex* array containing values of A in ELL @param d_colind magma_int_t* columnindices of A in ELL @param d_x magmaFloatComplex* input vector x @param beta magmaFloatComplex scalar multiplier @param d_y magmaFloatComplex* input/output vector y @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaFloatComplex ); // num_vecs vectors cmgeelltmv_kernel<<< grid, BLOCK_SIZE, MEM_SIZE >>> ( m, n, num_vecs, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y ); return MAGMA_SUCCESS; }
7d1053132fc2e381a577a622dab6a715b68be6d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void cuAdd(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] + b[i]; } } extern "C" __global__ void cuMult(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] * b[i]; } } extern "C" __global__ void cuDiv(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] / b[i]; } } extern "C" __global__ void cuExp(int n, float *a, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = expf(a[i]); } }
7d1053132fc2e381a577a622dab6a715b68be6d1.cu
extern "C" __global__ void cuAdd(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] + b[i]; } } extern "C" __global__ void cuMult(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] * b[i]; } } extern "C" __global__ void cuDiv(int n, float *a, float *b, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = a[i] / b[i]; } } extern "C" __global__ void cuExp(int n, float *a, float *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { result[i] = expf(a[i]); } }
66a7f1d9c2b459f0399b150c65b4c00d0007feac.hip
// !!! This is a file automatically generated by hipify!!! /* Matrix Multiplication w/GPU (cuda) Vctor Rendn Surez A01022462 */ #include <hip/hip_runtime.h> #include <chrono> #include "common.h" using namespace std; #define SIZE 4000 void initialize_matrix(int *matrix, int n) { for (int i = 0; i < n * n; i++) matrix[i] = i; } __global__ void multiply_matrix_cuda(int *matrixA, int *matrixB, long *result, int n) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = blockIdx.y; if(ix < n && iy < n) { long add = 0; for (int i = 0; i < n; i++) { add += matrixA[iy * n + i] * matrixB[i * n + ix]; } result[iy * n + ix] = add; } } int main(int argc, char const *argv[]) { // Setup device int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("Using Device %d: %s\n", dev, deviceProp.name); hipSetDevice(dev); // Specify size int n = SIZE; int bytes = n * n * sizeof(int); int lngBytes = n * n * sizeof(long); // Matrix definition int *matrixA = (int *) malloc(bytes); int *matrixB = (int *) malloc(bytes); long *result = (long *) malloc(lngBytes); int *d_matrixA; int *d_matrixB; long *d_result_matrix; // Initialize matrices initialize_matrix(matrixA, n); initialize_matrix(matrixB, n); // Allocate device memory hipMalloc((void **)&d_matrixA, bytes); hipMalloc((void **)&d_matrixB, bytes); hipMalloc((void **)&d_result_matrix, lngBytes); // Transfer data from host to device hipMemcpy(d_matrixA, matrixA, bytes, hipMemcpyHostToDevice); hipMemcpy(d_matrixB, matrixB, bytes, hipMemcpyHostToDevice); // Kernel configuration int dimx = 512; dim3 block(dimx, 1); dim3 grid((n + block.x - 1) / block.x, n); // Multiply the matrices using GPU, measure elapsed time auto start_time = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( multiply_matrix_cuda), dim3(grid), dim3(block), 0, 0, d_matrixA, d_matrixB, d_result_matrix, n); hipDeviceSynchronize(); auto end_time = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_time - start_time; printf("Matrix multiplication on GPU, time elapsed: %f ms\n", duration_ms.count()); // Copy result to host hipMemcpy(result, d_result_matrix, lngBytes, hipMemcpyDeviceToHost); // Free allocated memory hipFree(d_matrixA); hipFree(d_matrixB); hipFree(d_result_matrix); free(matrixA); free(matrixB); free(result); hipDeviceReset(); return 0; }
66a7f1d9c2b459f0399b150c65b4c00d0007feac.cu
/* Matrix Multiplication w/GPU (cuda) Víctor Rendón Suárez A01022462 */ #include <cuda_runtime.h> #include <chrono> #include "common.h" using namespace std; #define SIZE 4000 void initialize_matrix(int *matrix, int n) { for (int i = 0; i < n * n; i++) matrix[i] = i; } __global__ void multiply_matrix_cuda(int *matrixA, int *matrixB, long *result, int n) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = blockIdx.y; if(ix < n && iy < n) { long add = 0; for (int i = 0; i < n; i++) { add += matrixA[iy * n + i] * matrixB[i * n + ix]; } result[iy * n + ix] = add; } } int main(int argc, char const *argv[]) { // Setup device int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Using Device %d: %s\n", dev, deviceProp.name); cudaSetDevice(dev); // Specify size int n = SIZE; int bytes = n * n * sizeof(int); int lngBytes = n * n * sizeof(long); // Matrix definition int *matrixA = (int *) malloc(bytes); int *matrixB = (int *) malloc(bytes); long *result = (long *) malloc(lngBytes); int *d_matrixA; int *d_matrixB; long *d_result_matrix; // Initialize matrices initialize_matrix(matrixA, n); initialize_matrix(matrixB, n); // Allocate device memory cudaMalloc((void **)&d_matrixA, bytes); cudaMalloc((void **)&d_matrixB, bytes); cudaMalloc((void **)&d_result_matrix, lngBytes); // Transfer data from host to device cudaMemcpy(d_matrixA, matrixA, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_matrixB, matrixB, bytes, cudaMemcpyHostToDevice); // Kernel configuration int dimx = 512; dim3 block(dimx, 1); dim3 grid((n + block.x - 1) / block.x, n); // Multiply the matrices using GPU, measure elapsed time auto start_time = chrono::high_resolution_clock::now(); multiply_matrix_cuda<<<grid, block>>>(d_matrixA, d_matrixB, d_result_matrix, n); cudaDeviceSynchronize(); auto end_time = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_time - start_time; printf("Matrix multiplication on GPU, time elapsed: %f ms\n", duration_ms.count()); // Copy result to host cudaMemcpy(result, d_result_matrix, lngBytes, cudaMemcpyDeviceToHost); // Free allocated memory cudaFree(d_matrixA); cudaFree(d_matrixB); cudaFree(d_result_matrix); free(matrixA); free(matrixB); free(result); cudaDeviceReset(); return 0; }
ba395602e1bc34ef1f3a3210ec159fc7ab124c68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from ziteric.cu normal z -> s, Fri Sep 11 18:29:42 2015 */ #include "common_magmasparse.h" #define PRECISION_s __global__ void magma_siteric_csr_kernel( magma_int_t n, magma_int_t nnz, magma_index_t *Arowidx, magma_index_t *Acolidx, const float * __restrict__ A_val, magma_index_t *rowptr, magma_index_t *colidx, float *val ) { int i, j; int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz; float zero = MAGMA_S_MAKE(0.0, 0.0); float s, sp; int il, iu, jl, ju; if ( k < nnz ) { i = Arowidx[k]; j = Acolidx[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg( A_val+k ); #else s = A_val[k]; #endif il = rowptr[i]; iu = rowptr[j]; while (il < rowptr[i+1] && iu < rowptr[j+1]) { sp = zero; jl = colidx[il]; ju = colidx[iu]; if (jl < ju) il++; else if (ju < jl) iu++; else { // we are going to modify this u entry sp = val[il] * val[iu]; s -= sp; il++; iu++; } } // undo the last operation (it must be the last) s += sp; __syncthreads(); // modify entry if (i == j) val[il-1] = MAGMA_S_MAKE( sqrt( fabs( MAGMA_S_REAL(s) )), 0.0 ); else val[il-1] = s / val[iu-1]; } }// kernel /** Purpose ------- This routine iteratively computes an incomplete Cholesky factorization. The idea is according to Edmond Chow's presentation at SIAM 2014. This routine was used in the ISC 2015 paper: E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs' The input format of the initial guess matrix A is Magma_CSRCOO, A_CSR is CSR or CSRCOO format. Arguments --------- @param[in] A magma_s_matrix input matrix A - initial guess (lower triangular) @param[in,out] A_CSR magma_s_matrix input/output matrix containing the IC approximation @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_siteric_csr( magma_s_matrix A, magma_s_matrix A_CSR, magma_queue_t queue ) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; // Runtime API // hipFuncCachePreferShared: shared memory is 48 KB // hipFuncCachePreferEqual: shared memory is 32 KB // hipFuncCachePreferL1: shared memory is 16 KB // hipFuncCachePreferNone: no preference //hipFuncSetCacheConfig(hipFuncCachePreferShared); hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); hipLaunchKernelGGL(( magma_siteric_csr_kernel), dim3(grid), dim3(block), 0, magma_stream , A.num_rows, A.nnz, A.rowidx, A.col, A.val, A_CSR.row, A_CSR.col, A_CSR.val ); return MAGMA_SUCCESS; }
ba395602e1bc34ef1f3a3210ec159fc7ab124c68.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from ziteric.cu normal z -> s, Fri Sep 11 18:29:42 2015 */ #include "common_magmasparse.h" #define PRECISION_s __global__ void magma_siteric_csr_kernel( magma_int_t n, magma_int_t nnz, magma_index_t *Arowidx, magma_index_t *Acolidx, const float * __restrict__ A_val, magma_index_t *rowptr, magma_index_t *colidx, float *val ) { int i, j; int k = (blockDim.x * blockIdx.x + threadIdx.x); // % nnz; float zero = MAGMA_S_MAKE(0.0, 0.0); float s, sp; int il, iu, jl, ju; if ( k < nnz ) { i = Arowidx[k]; j = Acolidx[k]; #if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s)) s = __ldg( A_val+k ); #else s = A_val[k]; #endif il = rowptr[i]; iu = rowptr[j]; while (il < rowptr[i+1] && iu < rowptr[j+1]) { sp = zero; jl = colidx[il]; ju = colidx[iu]; if (jl < ju) il++; else if (ju < jl) iu++; else { // we are going to modify this u entry sp = val[il] * val[iu]; s -= sp; il++; iu++; } } // undo the last operation (it must be the last) s += sp; __syncthreads(); // modify entry if (i == j) val[il-1] = MAGMA_S_MAKE( sqrt( fabs( MAGMA_S_REAL(s) )), 0.0 ); else val[il-1] = s / val[iu-1]; } }// kernel /** Purpose ------- This routine iteratively computes an incomplete Cholesky factorization. The idea is according to Edmond Chow's presentation at SIAM 2014. This routine was used in the ISC 2015 paper: E. Chow et al.: 'Study of an Asynchronous Iterative Algorithm for Computing Incomplete Factorizations on GPUs' The input format of the initial guess matrix A is Magma_CSRCOO, A_CSR is CSR or CSRCOO format. Arguments --------- @param[in] A magma_s_matrix input matrix A - initial guess (lower triangular) @param[in,out] A_CSR magma_s_matrix input/output matrix containing the IC approximation @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_siteric_csr( magma_s_matrix A, magma_s_matrix A_CSR, magma_queue_t queue ) { int blocksize1 = 128; int blocksize2 = 1; int dimgrid1 = magma_ceildiv( A.nnz, blocksize1 ); int dimgrid2 = 1; int dimgrid3 = 1; // Runtime API // cudaFuncCachePreferShared: shared memory is 48 KB // cudaFuncCachePreferEqual: shared memory is 32 KB // cudaFuncCachePreferL1: shared memory is 16 KB // cudaFuncCachePreferNone: no preference //cudaFuncSetCacheConfig(cudaFuncCachePreferShared); cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); dim3 grid( dimgrid1, dimgrid2, dimgrid3 ); dim3 block( blocksize1, blocksize2, 1 ); magma_siteric_csr_kernel<<< grid, block, 0, magma_stream >>> ( A.num_rows, A.nnz, A.rowidx, A.col, A.val, A_CSR.row, A_CSR.col, A_CSR.val ); return MAGMA_SUCCESS; }
d187dfb486d28285fd6d16585c91c100d4986e03.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! \file LoadBalancerGPU.cu \brief Implementation the GPU functions for load balancing */ #ifdef ENABLE_MPI #include "LoadBalancerGPU.cuh" #include "hoomd/extern/hipcub/hipcub.hpp" //! Mark the particles that are off rank /*! * \param d_ranks The current rank of each particle * \param d_pos Particle positions * \param d_cart_ranks Map from Cartesian coordinates to rank number * \param rank_pos Cartesian coordinates of current rank * \param box Local box * \param di Domain indexer * \param N Number of local particles * * Using a thread per particle, the current rank of each particle is computed assuming that a particle cannot migrate * more than a single rank in any direction. The Cartesian rank of the particle is computed, and mapped back to a physical * rank. */ __global__ void gpu_load_balance_mark_rank_kernel(unsigned int *d_ranks, const Scalar4 *d_pos, const unsigned int *d_cart_ranks, const uint3 rank_pos, const BoxDim box, const Index3D di, const unsigned int N) { // particle index const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per particle if (idx >= N) return; const Scalar4 postype = d_pos[idx]; const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); const Scalar3 f = box.makeFraction(pos); int3 grid_pos = make_int3(rank_pos.x, rank_pos.y, rank_pos.z); if (f.x >= Scalar(1.0)) ++grid_pos.x; if (f.x < Scalar(0.0)) --grid_pos.x; if (f.y >= Scalar(1.0)) ++grid_pos.y; if (f.y < Scalar(0.0)) --grid_pos.y; if (f.z >= Scalar(1.0)) ++grid_pos.z; if (f.z < Scalar(0.0)) --grid_pos.z; if (grid_pos.x == (int)di.getW()) grid_pos.x = 0; else if (grid_pos.x < 0) grid_pos.x += di.getW(); if (grid_pos.y == (int)di.getH()) grid_pos.y = 0; else if (grid_pos.y < 0) grid_pos.y += di.getH(); if (grid_pos.z == (int)di.getD()) grid_pos.z = 0; else if (grid_pos.z < 0) grid_pos.z += di.getD(); const unsigned int cur_rank = d_cart_ranks[di(grid_pos.x,grid_pos.y,grid_pos.z)]; d_ranks[idx] = cur_rank; } /*! * \param d_ranks The current rank of each particle * \param d_pos Particle positions * \param d_cart_ranks Map from Cartesian coordinates to rank number * \param rank_pos Cartesian coordinates of current rank * \param box Local box * \param di Domain indexer * \param N Number of local particles * \param block_size Kernel launch block size * * This simply a kernel driver, see gpu_load_balance_mark_rank_kernel for details. */ void gpu_load_balance_mark_rank(unsigned int *d_ranks, const Scalar4 *d_pos, const unsigned int *d_cart_ranks, const uint3 rank_pos, const BoxDim& box, const Index3D& di, const unsigned int N, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void *)gpu_load_balance_mark_rank_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); unsigned int n_blocks = N/run_block_size + 1; hipLaunchKernelGGL(( gpu_load_balance_mark_rank_kernel), dim3(n_blocks), dim3(run_block_size), 0, 0, d_ranks, d_pos, d_cart_ranks, rank_pos, box, di, N); } //! Functor for selecting ranks not equal to the current rank struct NotEqual { unsigned int not_eq_val; //!< Value to test if not equal to __host__ __device__ __forceinline__ NotEqual(unsigned int _not_eq_val) : not_eq_val(_not_eq_val) {} __host__ __device__ __forceinline__ bool operator()(const unsigned int &a) const { return (a != not_eq_val); } }; /*! * \param d_off_rank (Reduced) list of particles that are off the current rank * \param d_n_select Number of particles that are off the current rank * \param d_ranks The current rank of each particle * \param d_tmp_storage Temporary storage array, or NULL * \param tmp_storage_bytes Size of temporary storage, or 0 * \param N Number of local particles * \param cur_rank Current rank index * * This function uses the CUB DeviceSelect::If primitive to select particles that are off rank using the NotEqual * functor. As is usual, this function must be called twice in order to perform the selection. If \a d_tmp_storage * is NULL, the temporary storage requirement is computed and saved in \a tmp_storage_bytes. This is externally * allocated from the CachedAllocator. When called the second time, the ranks of the particles not on the current * rank are saved in \a d_off_rank, and the number of these particles is saved in \a d_n_select. */ void gpu_load_balance_select_off_rank(unsigned int *d_off_rank, unsigned int *d_n_select, unsigned int *d_ranks, void *d_tmp_storage, size_t &tmp_storage_bytes, const unsigned int N, const unsigned int cur_rank) { // final precaution against calling with an empty array if (N == 0) return; NotEqual select_op(cur_rank); hipcub::DeviceSelect::If(d_tmp_storage, tmp_storage_bytes, d_ranks, d_off_rank, d_n_select, N, select_op); } #endif // ENABLE_MPI
d187dfb486d28285fd6d16585c91c100d4986e03.cu
// Copyright (c) 2009-2016 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! \file LoadBalancerGPU.cu \brief Implementation the GPU functions for load balancing */ #ifdef ENABLE_MPI #include "LoadBalancerGPU.cuh" #include "hoomd/extern/cub/cub.cuh" //! Mark the particles that are off rank /*! * \param d_ranks The current rank of each particle * \param d_pos Particle positions * \param d_cart_ranks Map from Cartesian coordinates to rank number * \param rank_pos Cartesian coordinates of current rank * \param box Local box * \param di Domain indexer * \param N Number of local particles * * Using a thread per particle, the current rank of each particle is computed assuming that a particle cannot migrate * more than a single rank in any direction. The Cartesian rank of the particle is computed, and mapped back to a physical * rank. */ __global__ void gpu_load_balance_mark_rank_kernel(unsigned int *d_ranks, const Scalar4 *d_pos, const unsigned int *d_cart_ranks, const uint3 rank_pos, const BoxDim box, const Index3D di, const unsigned int N) { // particle index const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; // one thread per particle if (idx >= N) return; const Scalar4 postype = d_pos[idx]; const Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z); const Scalar3 f = box.makeFraction(pos); int3 grid_pos = make_int3(rank_pos.x, rank_pos.y, rank_pos.z); if (f.x >= Scalar(1.0)) ++grid_pos.x; if (f.x < Scalar(0.0)) --grid_pos.x; if (f.y >= Scalar(1.0)) ++grid_pos.y; if (f.y < Scalar(0.0)) --grid_pos.y; if (f.z >= Scalar(1.0)) ++grid_pos.z; if (f.z < Scalar(0.0)) --grid_pos.z; if (grid_pos.x == (int)di.getW()) grid_pos.x = 0; else if (grid_pos.x < 0) grid_pos.x += di.getW(); if (grid_pos.y == (int)di.getH()) grid_pos.y = 0; else if (grid_pos.y < 0) grid_pos.y += di.getH(); if (grid_pos.z == (int)di.getD()) grid_pos.z = 0; else if (grid_pos.z < 0) grid_pos.z += di.getD(); const unsigned int cur_rank = d_cart_ranks[di(grid_pos.x,grid_pos.y,grid_pos.z)]; d_ranks[idx] = cur_rank; } /*! * \param d_ranks The current rank of each particle * \param d_pos Particle positions * \param d_cart_ranks Map from Cartesian coordinates to rank number * \param rank_pos Cartesian coordinates of current rank * \param box Local box * \param di Domain indexer * \param N Number of local particles * \param block_size Kernel launch block size * * This simply a kernel driver, see gpu_load_balance_mark_rank_kernel for details. */ void gpu_load_balance_mark_rank(unsigned int *d_ranks, const Scalar4 *d_pos, const unsigned int *d_cart_ranks, const uint3 rank_pos, const BoxDim& box, const Index3D& di, const unsigned int N, const unsigned int block_size) { static unsigned int max_block_size = UINT_MAX; if (max_block_size == UINT_MAX) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, (const void *)gpu_load_balance_mark_rank_kernel); max_block_size = attr.maxThreadsPerBlock; } unsigned int run_block_size = min(block_size, max_block_size); unsigned int n_blocks = N/run_block_size + 1; gpu_load_balance_mark_rank_kernel<<<n_blocks, run_block_size>>>(d_ranks, d_pos, d_cart_ranks, rank_pos, box, di, N); } //! Functor for selecting ranks not equal to the current rank struct NotEqual { unsigned int not_eq_val; //!< Value to test if not equal to __host__ __device__ __forceinline__ NotEqual(unsigned int _not_eq_val) : not_eq_val(_not_eq_val) {} __host__ __device__ __forceinline__ bool operator()(const unsigned int &a) const { return (a != not_eq_val); } }; /*! * \param d_off_rank (Reduced) list of particles that are off the current rank * \param d_n_select Number of particles that are off the current rank * \param d_ranks The current rank of each particle * \param d_tmp_storage Temporary storage array, or NULL * \param tmp_storage_bytes Size of temporary storage, or 0 * \param N Number of local particles * \param cur_rank Current rank index * * This function uses the CUB DeviceSelect::If primitive to select particles that are off rank using the NotEqual * functor. As is usual, this function must be called twice in order to perform the selection. If \a d_tmp_storage * is NULL, the temporary storage requirement is computed and saved in \a tmp_storage_bytes. This is externally * allocated from the CachedAllocator. When called the second time, the ranks of the particles not on the current * rank are saved in \a d_off_rank, and the number of these particles is saved in \a d_n_select. */ void gpu_load_balance_select_off_rank(unsigned int *d_off_rank, unsigned int *d_n_select, unsigned int *d_ranks, void *d_tmp_storage, size_t &tmp_storage_bytes, const unsigned int N, const unsigned int cur_rank) { // final precaution against calling with an empty array if (N == 0) return; NotEqual select_op(cur_rank); cub::DeviceSelect::If(d_tmp_storage, tmp_storage_bytes, d_ranks, d_off_rank, d_n_select, N, select_op); } #endif // ENABLE_MPI
a5ec48dda333e300c971bf5258d32afa56def808.hip
// !!! This is a file automatically generated by hipify!!! /* * Universit Pierre et Marie Curie * Calcul de transport de neutrons * Version squentielle */ #include <hip/hip_runtime.h> #include "neutron.h" #include <utility> #include <chrono> #include <iostream> #include "neutron_cuda_kernel.h" using namespace std::chrono; /** * Retourne le quotient entier superieur ou egal a "a/b". */ template<typename T> inline static T iDivUp(T a, T b){ return ((a % b != 0) ? (a / b + 1) : (a / b)); } ExperimentalResults neutron_cuda_caller(float* absorbed, long n, const ProblemParameters& params, const std::vector<unsigned long long>& seeds, int threadsPerBlock, int neutronsPerThread) { const auto threads = threadsPerBlock*iDivUp<long>(n, threadsPerBlock*neutronsPerThread); auto t1 = system_clock::now(); unsigned long long* d_seeds; hipMalloc((void**)&d_seeds, seeds.size()*sizeof(unsigned long long)); hipMemcpy(d_seeds, seeds.data(), seeds.size()*sizeof(unsigned long long), hipMemcpyHostToDevice); // launching cuda kernel ProblemParameters* d_params; hipMalloc((void**)&d_params, sizeof(ProblemParameters)); hipMemcpy(d_params, &params, sizeof(ProblemParameters), hipMemcpyHostToDevice); unsigned long long int* d_next_absorbed; hipMalloc((void**)&d_next_absorbed, sizeof(unsigned long long int)); hipMemset(d_next_absorbed, 0, sizeof(unsigned long long int)); float* d_absorbed; hipMalloc((void**)&d_absorbed, n*sizeof(float)); #ifdef TEST hipMemcpy(d_absorbed, absorbed, n*sizeof(float), hipMemcpyHostToDevice); #endif unsigned long long int* d_r, * d_b, * d_t; hipMalloc((void**)&d_r, sizeof(unsigned long long int)); hipMalloc((void**)&d_b, sizeof(unsigned long long int)); hipMalloc((void**)&d_t, sizeof(unsigned long long int)); hipMemset(d_r, 0, sizeof(unsigned long long int)); hipMemset(d_b, 0, sizeof(unsigned long long int)); hipMemset(d_t, 0, sizeof(unsigned long long int)); auto t2 = system_clock::now(); std::cout << "Temps de la copie CPU -> GPU: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl; const dim3 nthreads(threadsPerBlock); const dim3 nblocs(iDivUp<long>(n, threadsPerBlock*neutronsPerThread)); std::cout << "Nombre de blocs GPU: " << nblocs.x << std::endl; std::cout << "Nombre de threads par bloc: " << nthreads.x << std::endl; std::cout << "Mmoire utilise: " << (n*4.)/(1024.*1024.) << "Mo" << std::endl; t1 = system_clock::now(); hipLaunchKernelGGL(( neutron_cuda_kernel), dim3(nthreads), dim3(nblocs), 0, 0, n, neutronsPerThread, d_params, d_next_absorbed, d_absorbed, d_r, d_b, d_t, d_seeds); // retrieving results hipDeviceSynchronize(); t2 = system_clock::now(); std::cout << "Temps du kernel: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl; ExperimentalResults res; unsigned long long int r, b, t; hipMemcpy(&r, d_r, sizeof(unsigned long long int), hipMemcpyDeviceToHost); hipMemcpy(&b, d_b, sizeof(unsigned long long int), hipMemcpyDeviceToHost); hipMemcpy(&t, d_t, sizeof(unsigned long long int), hipMemcpyDeviceToHost); res.r = static_cast<long>(r); res.b = static_cast<long>(b); res.t = static_cast<long>(t); t1 = system_clock::now(); res.absorbed = absorbed; hipMemcpy(res.absorbed, d_absorbed, res.b*sizeof(float), hipMemcpyDeviceToHost); t2 = system_clock::now(); std::cout << "Temps de la copie GPU -> CPU: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl; hipDeviceReset(); // hipFree(*) return res; }
a5ec48dda333e300c971bf5258d32afa56def808.cu
/* * Université Pierre et Marie Curie * Calcul de transport de neutrons * Version séquentielle */ #include <cuda.h> #include "neutron.h" #include <utility> #include <chrono> #include <iostream> #include "neutron_cuda_kernel.h" using namespace std::chrono; /** * Retourne le quotient entier superieur ou egal a "a/b". */ template<typename T> inline static T iDivUp(T a, T b){ return ((a % b != 0) ? (a / b + 1) : (a / b)); } ExperimentalResults neutron_cuda_caller(float* absorbed, long n, const ProblemParameters& params, const std::vector<unsigned long long>& seeds, int threadsPerBlock, int neutronsPerThread) { const auto threads = threadsPerBlock*iDivUp<long>(n, threadsPerBlock*neutronsPerThread); auto t1 = system_clock::now(); unsigned long long* d_seeds; cudaMalloc((void**)&d_seeds, seeds.size()*sizeof(unsigned long long)); cudaMemcpy(d_seeds, seeds.data(), seeds.size()*sizeof(unsigned long long), cudaMemcpyHostToDevice); // launching cuda kernel ProblemParameters* d_params; cudaMalloc((void**)&d_params, sizeof(ProblemParameters)); cudaMemcpy(d_params, &params, sizeof(ProblemParameters), cudaMemcpyHostToDevice); unsigned long long int* d_next_absorbed; cudaMalloc((void**)&d_next_absorbed, sizeof(unsigned long long int)); cudaMemset(d_next_absorbed, 0, sizeof(unsigned long long int)); float* d_absorbed; cudaMalloc((void**)&d_absorbed, n*sizeof(float)); #ifdef TEST cudaMemcpy(d_absorbed, absorbed, n*sizeof(float), cudaMemcpyHostToDevice); #endif unsigned long long int* d_r, * d_b, * d_t; cudaMalloc((void**)&d_r, sizeof(unsigned long long int)); cudaMalloc((void**)&d_b, sizeof(unsigned long long int)); cudaMalloc((void**)&d_t, sizeof(unsigned long long int)); cudaMemset(d_r, 0, sizeof(unsigned long long int)); cudaMemset(d_b, 0, sizeof(unsigned long long int)); cudaMemset(d_t, 0, sizeof(unsigned long long int)); auto t2 = system_clock::now(); std::cout << "Temps de la copie CPU -> GPU: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl; const dim3 nthreads(threadsPerBlock); const dim3 nblocs(iDivUp<long>(n, threadsPerBlock*neutronsPerThread)); std::cout << "Nombre de blocs GPU: " << nblocs.x << std::endl; std::cout << "Nombre de threads par bloc: " << nthreads.x << std::endl; std::cout << "Mémoire utilisée: " << (n*4.)/(1024.*1024.) << "Mo" << std::endl; t1 = system_clock::now(); neutron_cuda_kernel<<<nthreads, nblocs>>>(n, neutronsPerThread, d_params, d_next_absorbed, d_absorbed, d_r, d_b, d_t, d_seeds); // retrieving results cudaDeviceSynchronize(); t2 = system_clock::now(); std::cout << "Temps du kernel: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl; ExperimentalResults res; unsigned long long int r, b, t; cudaMemcpy(&r, d_r, sizeof(unsigned long long int), cudaMemcpyDeviceToHost); cudaMemcpy(&b, d_b, sizeof(unsigned long long int), cudaMemcpyDeviceToHost); cudaMemcpy(&t, d_t, sizeof(unsigned long long int), cudaMemcpyDeviceToHost); res.r = static_cast<long>(r); res.b = static_cast<long>(b); res.t = static_cast<long>(t); t1 = system_clock::now(); res.absorbed = absorbed; cudaMemcpy(res.absorbed, d_absorbed, res.b*sizeof(float), cudaMemcpyDeviceToHost); t2 = system_clock::now(); std::cout << "Temps de la copie GPU -> CPU: " << std::chrono::duration_cast<milliseconds>(t2 - t1).count()/1000. << " sec" << std::endl; cudaDeviceReset(); // cudaFree(*) return res; }
8de4765213dbf489739cd4f62c835ab546345fdb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2008 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Part 1 of 1: implement the kernel __global__ void reverseArrayBlock( ) { } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 256; // pointer for device memory int *d_b, *d_a; // define grid and block size int numBlocks = 1; int numThreadsPerBlock = dimA; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); hipMalloc( (void **) &d_a, memSize ); hipMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i) { h_a[i] = i; } // Copy host array to device array hipMemcpy( d_a, h_a, memSize, hipMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); hipLaunchKernelGGL(( reverseArrayBlock), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_b, d_a ); // block until the device has completed hipDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // device to host copy hipMemcpy( h_a, d_b, memSize, hipMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == dimA - 1 - i ); } // free device memory hipFree(d_a); hipFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } }
8de4765213dbf489739cd4f62c835ab546345fdb.cu
/* * Copyright 1993-2008 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // Part 1 of 1: implement the kernel __global__ void reverseArrayBlock( ) { } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer for host memory and size int *h_a; int dimA = 256; // pointer for device memory int *d_b, *d_a; // define grid and block size int numBlocks = 1; int numThreadsPerBlock = dimA; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); cudaMalloc( (void **) &d_a, memSize ); cudaMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i) { h_a[i] = i; } // Copy host array to device array cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); reverseArrayBlock<<< dimGrid, dimBlock >>>( d_b, d_a ); // block until the device has completed cudaThreadSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); // device to host copy cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost ); // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(h_a[i] == dimA - 1 - i ); } // free device memory cudaFree(d_a); cudaFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
908269a67af6779bc5445a54fa9de2e94def8350.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void RadixHistogram_device( int *dptrHistogram, const int *in, size_t N, int shift, int mask ) { for ( int i = blockIdx.x*blockDim.x+threadIdx.x; i < N; i += blockDim.x*gridDim.x ) { int index = (in[i] & mask) >> shift; atomicAdd( dptrHistogram+index, 1 ); } #if 0 const int cBuckets = 1<<b; __shared__ unsigned char sharedHistogram[NUM_THREADS][cBuckets]; for ( int i = blockIdx.x*blockDim.x+threadIdx.x; i < N; i += blockDim.x*gridDim.x ) { int index = (in[i] & mask) >> shift; if ( 0 == ++sharedHistogram[threadIdx.x][index] ) { atomicAdd( dptrHistogram+index, 256 ); } } __syncthreads(); for ( int i = 0; i < cBuckets; i++ ) { if ( sharedHistogram[threadIdx.x][i] ) { atomicAdd( dptrHistogram+i, sharedHistogram[threadIdx.x][i] ); } } #endif }
908269a67af6779bc5445a54fa9de2e94def8350.cu
#include "includes.h" __global__ void RadixHistogram_device( int *dptrHistogram, const int *in, size_t N, int shift, int mask ) { for ( int i = blockIdx.x*blockDim.x+threadIdx.x; i < N; i += blockDim.x*gridDim.x ) { int index = (in[i] & mask) >> shift; atomicAdd( dptrHistogram+index, 1 ); } #if 0 const int cBuckets = 1<<b; __shared__ unsigned char sharedHistogram[NUM_THREADS][cBuckets]; for ( int i = blockIdx.x*blockDim.x+threadIdx.x; i < N; i += blockDim.x*gridDim.x ) { int index = (in[i] & mask) >> shift; if ( 0 == ++sharedHistogram[threadIdx.x][index] ) { atomicAdd( dptrHistogram+index, 256 ); } } __syncthreads(); for ( int i = 0; i < cBuckets; i++ ) { if ( sharedHistogram[threadIdx.x][i] ) { atomicAdd( dptrHistogram+i, sharedHistogram[threadIdx.x][i] ); } } #endif }
f7bbaae56b33c3d79f64adb370840dd9af481711.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Samson Wang. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> // For small input computation template <typename T, int FixedKernelSize> __global__ void DepthWiseConv2dSmallFForward(const T* bottom_data, const T* weight_data, const T* bias_data, const int channels, const int padding, const int height, const int width, const int in_kernel_size, const int out_height, const int out_width, const int batch_size, const bool forward, T* top_data) { const int in_num = height * width; const int out_num = out_height * out_width; const int pad_height = height + padding*2; const int pad_width = width + padding*2; const int pad_num = pad_height * pad_width; const int kernel_size = FixedKernelSize > 0 ? FixedKernelSize : in_kernel_size; const int kernel_num = kernel_size * kernel_size; const int thread_num = blockDim.x * blockDim.y; const int n_steps = blockDim.z * gridDim.z; const int out_num_total = n_steps * out_num; const int in_num_total = n_steps * in_num; const int tidz = threadIdx.z + blockDim.z * blockIdx.z; const int tidx = blockDim.x * threadIdx.y + threadIdx.x; __shared__ T w_shared[16*16]; __shared__ T tmp_shared[4*16*16]; // Initialize tmp shared for input data for (int off = threadIdx.z * thread_num + tidx; off < 4 * 256; off += thread_num) { tmp_shared[off] = T(0); } T bias = T(0); // if (bias_data != NULL) bias = bias_data[c]; __syncthreads(); const int bound = batch_size * channels; const int pidx = pad_width * (threadIdx.y + padding) + threadIdx.x + padding; const int opidx = pad_width * threadIdx.y + threadIdx.x; int tmp_p_off = threadIdx.z * pad_num; int tmp_w_off = threadIdx.z * kernel_num; int tmp_off = width * threadIdx.y + threadIdx.x + tidz * in_num; int tmp_out_off = threadIdx.y * out_width + threadIdx.x + tidz * out_num; int half_pad_off = pad_width * blockDim.y; int half_in_off = width * blockDim.y; int half_out_off = out_width * blockDim.y; for (int n_off = 0; n_off < bound; n_off += n_steps) { int n_z = n_off + tidz; int c = n_z % channels; int c_off = c * kernel_num; if (n_z < bound) { // Load kernels into shared memory for (int off = tidx; off < kernel_num; off += thread_num) { if (forward) { w_shared[tmp_w_off + off] = weight_data[c_off + off]; } else { w_shared[tmp_w_off + off] = weight_data[c_off - off + kernel_num - 1]; } } // Load input data input shared memory, pay attention to the padding. if (threadIdx.x < width && threadIdx.y < height) { tmp_shared[tmp_p_off + pidx] = bottom_data[tmp_off]; if ((threadIdx.y + blockDim.y < height)) { tmp_shared[tmp_p_off + pidx + half_pad_off] = bottom_data[tmp_off + half_in_off]; } } } __syncthreads(); /* if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { for (int i = 0; i < kernel_size; i++) { for (int j = 0; j < kernel_size; j++) { printf("%f ", w_shared[i * kernel_size + j]); } printf("\n"); } for (int i = 0; i < pad_height; i++) { for (int j = 0; j < pad_width; j++) { printf("%f ", tmp_shared[i * pad_width + j]); } printf("\n"); } printf("blockdim %d, %d, %d", blockDim.x, blockDim.y, blockDim.z); } */ if (n_z < bound && threadIdx.x < out_width && threadIdx.y < out_height) { // To do the math T sum = T(0); T sum1 = T(0); int i_poff = tmp_p_off + opidx; #pragma unroll for (int i = 0; i < kernel_num; i+= kernel_size) { #pragma unroll for (int j = 0; j < kernel_size; j++) { const T f = w_shared[i + tmp_w_off + j]; sum += tmp_shared[i_poff + j] * f; if ((threadIdx.y + blockDim.y < out_height)) { sum1 += tmp_shared[i_poff + j + half_pad_off] * f; } } i_poff += pad_width; } // sum += bias; top_data[tmp_out_off] = sum; if ((threadIdx.y + blockDim.y < out_height)) { top_data[tmp_out_off + half_out_off] = sum1; //printf("top data %d, %d, %d, %d\n", threadIdx.x, threadIdx.y, tmp_out_off, half_out_off); } } tmp_off += in_num_total; tmp_out_off += out_num_total; __syncthreads(); } } template <typename T> __global__ void DepthWiseConv2dFForward(const T* bottom_data, const T* weight_data, const T* bias_data, const int channels, const int padding, const int height, const int width, const int kernel_size, const int out_height, const int out_width, const int output_size, const bool forward, T* top_data) { int tidx = threadIdx.y * blockDim.x + threadIdx.x; int o_idx = blockIdx.x * (blockDim.x - kernel_size + 1) + threadIdx.x; int o_idy = blockIdx.y * (blockDim.y - kernel_size + 1) + threadIdx.y; int c = (blockIdx.z) % channels; T bias = 0; if (bias_data != NULL) { bias = bias_data[c]; } int kernel_num = kernel_size * kernel_size; __shared__ T w_shared[32]; if (tidx < kernel_num) { if (forward) { w_shared[tidx] = weight_data[c * kernel_num + tidx]; } else { w_shared[tidx] = weight_data[c * kernel_num + kernel_num - 1 - tidx]; } } __syncthreads(); __shared__ T tmp_shared[32*32]; for (int n_off = 0; n_off < output_size; n_off += gridDim.z) { if (blockIdx.z + n_off < output_size) { T sum = 0; //int n = blockIdx.z / channels; // int i_off_x = threadIdx.x - padding; // int i_off_y = threadIdx.y - padding; if (o_idx - padding >= 0 && o_idx - padding < width && o_idy - padding >=0 && o_idy - padding < height) { tmp_shared[threadIdx.y * blockDim.x + threadIdx.x] = bottom_data[(blockIdx.z + n_off) * width * height + (o_idy - padding) * width + o_idx - padding]; // printf("tids %d, %d, oid %d, %d, padding %d, width %d, height %d, block %d, %d\n", tidx, tidy, o_idx, o_idy, padding, width, height, blockDim.x, blockDim.y); } else { tmp_shared[threadIdx.y * blockDim.x + threadIdx.x] = 0; } __syncthreads(); // std::cout << tidx << " " << tidy << " " << " o " << o_idx << " " << o_idy << " padding " << padding << " " << width << std::endl; if (o_idx >= 0 && o_idx < out_width && o_idy >=0 && o_idy < out_height && threadIdx.x < blockDim.x - kernel_size + 1 && threadIdx.y < blockDim.y - kernel_size + 1) { for (int i = 0; i < kernel_size; i++) { for (int j = 0; j < kernel_size; j++) { sum += tmp_shared[(threadIdx.y + i) * blockDim.x + threadIdx.x + j] * w_shared[i * kernel_size + j]; } } top_data[(n_off + blockIdx.z) * out_width * out_height + (o_idy ) * out_width + o_idx ] = sum + bias; } } else { // printf("blockDim %d, %d, %d. gridDim %d, %d, %d os %d z %d off %d ch %d\n", blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z, output_size, blockIdx.z, n_off, channels); } __syncthreads(); } } template <typename T> __global__ void DepthWiseConv2dLargeFForward(const T* bottom_data, const T* weight_data, const T* bias_data, const int channels, const int padding, const int height, const int width, const int kernel_size, const int out_height, const int out_width, const int batch_size, T* top_data) { __shared__ T true_r_shared[32*32]; int n_idx = blockIdx.x * blockDim.y + threadIdx.y; const int n_num = gridDim.x * blockDim.y; while (n_idx < channels * batch_size) { T* r_shared = true_r_shared; T sum[8] = {0}; T tmp = 0; const int c = n_idx % channels; int valid_kernel_w = kernel_size; int valid_kernel_h = kernel_size; T* data = const_cast<T*> (bottom_data); data = data + n_idx * width * height; T* weight = const_cast<T*> (weight_data); weight = weight + c * kernel_size * kernel_size; const int y_shift = blockIdx.y - padding; const int x_shift = blockIdx.z - padding; if (blockIdx.y < padding) { valid_kernel_h = valid_kernel_h + y_shift; weight = weight - y_shift * kernel_size; } else if (blockIdx.y >= out_height - padding) { valid_kernel_h = valid_kernel_h - (blockIdx.y - out_height + padding + 1); data = data + y_shift * width; } else { data = data + y_shift * width; } if (blockIdx.z < padding) { valid_kernel_w = valid_kernel_w + x_shift; weight = weight - x_shift; } else if (blockIdx.z >= out_width - padding) { valid_kernel_w = valid_kernel_w - (blockIdx.z - out_width + padding + 1); data = data + x_shift; } else { data = data + x_shift; } const int y_num = (valid_kernel_h / 8) * 8; r_shared = r_shared + threadIdx.y * blockDim.x; for (int tidx = threadIdx.x; tidx < valid_kernel_w; tidx += blockDim.x) { int tmp_tidx_d = tidx; int tmp_tidx_w = tidx; for (int tidy = 0; tidy < y_num; tidy += 8) { #pragma unroll for (int j = 0; j < 8; j++) { sum[j] += data[j * width + tmp_tidx_d] * weight[j * kernel_size + tmp_tidx_w]; } tmp_tidx_d = tmp_tidx_d + 8 * width; tmp_tidx_w = tmp_tidx_w + 8 * kernel_size; } for (int j = 0; j < valid_kernel_h - y_num; j++) { sum[j] += data[j * width + tmp_tidx_d] * weight[j * kernel_size + tmp_tidx_w]; } } #pragma unroll for (int j = 0; j < 8; j++) { tmp += sum[j]; } r_shared[threadIdx.x] = tmp; __syncthreads(); if (threadIdx.x < 32) { for (int j = 32 + threadIdx.x; j < blockDim.x; j += 32) { tmp += r_shared[j]; } r_shared[threadIdx.x] = tmp; } __syncthreads(); if (threadIdx.x == 0) { tmp = r_shared[0]; for (int j = 1; j < 32; j++) { tmp += r_shared[j]; } top_data[n_idx * out_width * out_height + blockIdx.y * out_width + blockIdx.z] = tmp; } __syncthreads(); n_idx += n_num; } } at::Tensor DepthWiseConv2d_forward_cuda(const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias, const int stride, const int padding, const int dilation, const int groups) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); auto batch_size = input.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto kernel_size = weight.size(2); auto out_height = (height - kernel_size + 1 + padding * 2) / stride; auto out_width = (width - kernel_size + 1 + padding * 2) / stride; AT_ASSERTM(weight.size(0) == channels, "Weight output channel must be equal to Input channel"); auto output = at::empty({batch_size, channels, out_height, out_width}, input.options()); auto blockdim = 32; if (out_width < kernel_size && out_width + kernel_size - 1 < 32) { blockdim = kernel_size; } else if (out_width + kernel_size - 1 < 32) { blockdim = out_width + kernel_size - 1; } auto blocks_x = THCCeilDiv((long)out_width, blockdim-kernel_size+1L); auto blocks_y = THCCeilDiv((long)out_height, blockdim-kernel_size+1L); auto output_size = batch_size * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto znum = output_size; if (znum > 2048) { znum = ::max((2048 / channels) * channels, channels); } if (kernel_size > 16) { int blocks_x = kernel_size <= 1024 ? kernel_size : 1024; int blocks_y = (1024) / blocks_x; //dim3 grid((channels * batch_size + blocks_y - 1) / blocks_y, out_height, out_width); dim3 grid((channels * batch_size) / blocks_y / 2, out_height, out_width); dim3 block(blocks_x, blocks_y); AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2d_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dLargeFForward<scalar_t>), dim3(grid), dim3(block), 0, stream, input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, batch_size, output.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); } else if (width + 2*padding > 16 || height + 2 * padding> 16) { dim3 grid(blocks_x, blocks_y, znum); dim3 block(blockdim, blockdim); // std::cout << "SHAPE dim x " << blocks_x << " dim y " << blocks_y << " nc " << batch_size * channels << std::endl; // std::cout << channels << " " << padding << " " << height << " " << width << " " << kernel_size << std::endl; //printf("blockdim %d, %d, %d, griddim %d, %d, %d outputsize %d\n", block.x, block.y, block.z, grid.x, grid.y, grid.z, output_size); //if (output.numel() == 0) { // THCudaCheck(hipGetLastError()); // return output; //} //niu // printf("blockdim %d, %d, %d, griddim %d, %d, %d outputsize %d, channels %d, width %d, height %d, padding %d, stride %d, bias %s, kernel_size %d\n", block.x, block.y, block.z, grid.x, grid.y, grid.z, batch_size, channels, width, height, padding, stride, bias.size(0), kernel_size); AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2d_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dFForward<scalar_t>), dim3(grid), dim3(block), 0, stream, input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, output_size, true, output.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); } else { auto dimy = THCCeilDiv((long)(height > out_height ? height : out_height), 2L); auto blocks_x = 1; auto blocks_y = 1; dim3 grid(blocks_x, blocks_y, THCCeilDiv((long)channels*batch_size, 64L)); dim3 block(width > out_width ? width : out_width, dimy, 8); // printf("Small blockdim %d, %d, %d, griddim %d, %d, %d outputsize %d, channels %d, width %d, height %d, padding %d, stride %d, bias %s, kernel_size %d\n", block.x, block.y, block.z, grid.x, grid.y, grid.z, batch_size, channels, width, height, padding, stride, bias.size(0), kernel_size); if (kernel_size == 3) { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dSmallFForward<scalar_t, 3>), dim3(grid), dim3(block), 0, stream, input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, batch_size, true, output.data<scalar_t>()); }); } else if (kernel_size == 5) { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dSmallFForward<scalar_t, 5>), dim3(grid), dim3(block), 0, stream, input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, batch_size, true, output.data<scalar_t>()); }); } else { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dSmallFForward<scalar_t, 0>), dim3(grid), dim3(block), 0, stream, input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, batch_size, true, output.data<scalar_t>()); }); } THCudaCheck(hipGetLastError()); } return output; } std::vector<at::Tensor> DepthWiseConv2d_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias, const int stride, const int padding, const int dilation, const int groups) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); auto batch_size = input.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto kernel_size = weight.size(2); auto out_height = (height - kernel_size + 1 + padding * 2) / stride; auto out_width = (width - kernel_size + 1 + padding * 2) / stride; AT_ASSERTM(weight.size(0) == channels, "Weight output channel must be equal to Input channel"); // To deal with input grad computation. auto grad_input = at::empty({batch_size, channels, height, width}, grad.options()); auto grad_weight = at::empty({channels, 1, kernel_size, kernel_size}, grad.options()); auto grad_bias = at::empty({bias.size(0)}, grad.options()); auto blockdim = 32; auto bwd_padding = kernel_size - 1 - padding; auto bwd_s = 1; std::cout << out_width << "x" << out_height << " Grad " << grad.size(2) << "x" << grad.size(3) << std::endl; std::cout << grad.size(3) - kernel_size + 1 + bwd_padding * 2 << " bwd " << bwd_padding << std::endl; AT_ASSERTM(width == (grad.size(3) - kernel_size + 1 + bwd_padding * 2), "grad_input computed size should be equal to input size") if (width < kernel_size && width + kernel_size - 1 < 32) { blockdim = kernel_size; } else if (width + kernel_size - 1 < 32) { blockdim = width + kernel_size - 1; } auto blocks_x = THCCeilDiv((long)width, blockdim-kernel_size+1L); auto blocks_y = THCCeilDiv((long)height, blockdim-kernel_size+1L); auto output_size = batch_size * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto znum = output_size; if (znum > 2048) { znum = ::max((2048 / channels) * channels, channels); } if (out_width + 2*padding > 16 || out_height + 2 * padding> 16) { dim3 grid(blocks_x, blocks_y, znum); dim3 block(blockdim, blockdim); AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2d_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dFForward<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, bwd_padding, out_height, out_width, kernel_size, height, width, output_size, false, grad_input.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); } else { auto dimy = THCCeilDiv((long)(out_height > height ? out_height : height), 2L); auto blocks_x = 1; auto blocks_y = 1; dim3 grid(blocks_x, blocks_y, THCCeilDiv((long)channels*batch_size, 64L)); dim3 block(out_width > width ? out_width : width, dimy, 8); if (kernel_size == 3) { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dSmallFForward<scalar_t, 3>), dim3(grid), dim3(block), 0, stream, grad.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, bwd_padding, out_height, out_width, kernel_size, height, width, batch_size, false, grad_input.data<scalar_t>()); }); std::cout << "3 small" << std::endl; } else if (kernel_size == 5) { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dSmallFForward<scalar_t, 5>), dim3(grid), dim3(block), 0, stream, grad.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, bwd_padding, out_height, out_width, kernel_size, height, width, batch_size, false, grad_input.data<scalar_t>()); }); std::cout << "5 small" << std::endl; } else { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { hipLaunchKernelGGL(( DepthWiseConv2dSmallFForward<scalar_t, 0>), dim3(grid), dim3(block), 0, stream, grad.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, bwd_padding, out_height, out_width, kernel_size, height, width, batch_size, false, grad_input.data<scalar_t>()); }); std::cout << "Common small" << std::endl; //printf("<%d, %d, %d>\nGrid <%d, %d, %d>\nshape %d, %d, %d, %d\n", block.x, block.y, block.z, grid.x, grid.y, grid.z, width, height, out_width, out_height); } THCudaCheck(hipGetLastError()); } //std::cout << "before return" << std::endl << out_width << std::endl << padding << std::endl << out_height << std::endl << width << std::endl; return std::vector<at::Tensor> {grad_input, grad_weight, grad_bias}; }
f7bbaae56b33c3d79f64adb370840dd9af481711.cu
// Copyright (c) Samson Wang. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> // For small input computation template <typename T, int FixedKernelSize> __global__ void DepthWiseConv2dSmallFForward(const T* bottom_data, const T* weight_data, const T* bias_data, const int channels, const int padding, const int height, const int width, const int in_kernel_size, const int out_height, const int out_width, const int batch_size, const bool forward, T* top_data) { const int in_num = height * width; const int out_num = out_height * out_width; const int pad_height = height + padding*2; const int pad_width = width + padding*2; const int pad_num = pad_height * pad_width; const int kernel_size = FixedKernelSize > 0 ? FixedKernelSize : in_kernel_size; const int kernel_num = kernel_size * kernel_size; const int thread_num = blockDim.x * blockDim.y; const int n_steps = blockDim.z * gridDim.z; const int out_num_total = n_steps * out_num; const int in_num_total = n_steps * in_num; const int tidz = threadIdx.z + blockDim.z * blockIdx.z; const int tidx = blockDim.x * threadIdx.y + threadIdx.x; __shared__ T w_shared[16*16]; __shared__ T tmp_shared[4*16*16]; // Initialize tmp shared for input data for (int off = threadIdx.z * thread_num + tidx; off < 4 * 256; off += thread_num) { tmp_shared[off] = T(0); } T bias = T(0); // if (bias_data != NULL) bias = bias_data[c]; __syncthreads(); const int bound = batch_size * channels; const int pidx = pad_width * (threadIdx.y + padding) + threadIdx.x + padding; const int opidx = pad_width * threadIdx.y + threadIdx.x; int tmp_p_off = threadIdx.z * pad_num; int tmp_w_off = threadIdx.z * kernel_num; int tmp_off = width * threadIdx.y + threadIdx.x + tidz * in_num; int tmp_out_off = threadIdx.y * out_width + threadIdx.x + tidz * out_num; int half_pad_off = pad_width * blockDim.y; int half_in_off = width * blockDim.y; int half_out_off = out_width * blockDim.y; for (int n_off = 0; n_off < bound; n_off += n_steps) { int n_z = n_off + tidz; int c = n_z % channels; int c_off = c * kernel_num; if (n_z < bound) { // Load kernels into shared memory for (int off = tidx; off < kernel_num; off += thread_num) { if (forward) { w_shared[tmp_w_off + off] = weight_data[c_off + off]; } else { w_shared[tmp_w_off + off] = weight_data[c_off - off + kernel_num - 1]; } } // Load input data input shared memory, pay attention to the padding. if (threadIdx.x < width && threadIdx.y < height) { tmp_shared[tmp_p_off + pidx] = bottom_data[tmp_off]; if ((threadIdx.y + blockDim.y < height)) { tmp_shared[tmp_p_off + pidx + half_pad_off] = bottom_data[tmp_off + half_in_off]; } } } __syncthreads(); /* if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) { for (int i = 0; i < kernel_size; i++) { for (int j = 0; j < kernel_size; j++) { printf("%f ", w_shared[i * kernel_size + j]); } printf("\n"); } for (int i = 0; i < pad_height; i++) { for (int j = 0; j < pad_width; j++) { printf("%f ", tmp_shared[i * pad_width + j]); } printf("\n"); } printf("blockdim %d, %d, %d", blockDim.x, blockDim.y, blockDim.z); } */ if (n_z < bound && threadIdx.x < out_width && threadIdx.y < out_height) { // To do the math T sum = T(0); T sum1 = T(0); int i_poff = tmp_p_off + opidx; #pragma unroll for (int i = 0; i < kernel_num; i+= kernel_size) { #pragma unroll for (int j = 0; j < kernel_size; j++) { const T f = w_shared[i + tmp_w_off + j]; sum += tmp_shared[i_poff + j] * f; if ((threadIdx.y + blockDim.y < out_height)) { sum1 += tmp_shared[i_poff + j + half_pad_off] * f; } } i_poff += pad_width; } // sum += bias; top_data[tmp_out_off] = sum; if ((threadIdx.y + blockDim.y < out_height)) { top_data[tmp_out_off + half_out_off] = sum1; //printf("top data %d, %d, %d, %d\n", threadIdx.x, threadIdx.y, tmp_out_off, half_out_off); } } tmp_off += in_num_total; tmp_out_off += out_num_total; __syncthreads(); } } template <typename T> __global__ void DepthWiseConv2dFForward(const T* bottom_data, const T* weight_data, const T* bias_data, const int channels, const int padding, const int height, const int width, const int kernel_size, const int out_height, const int out_width, const int output_size, const bool forward, T* top_data) { int tidx = threadIdx.y * blockDim.x + threadIdx.x; int o_idx = blockIdx.x * (blockDim.x - kernel_size + 1) + threadIdx.x; int o_idy = blockIdx.y * (blockDim.y - kernel_size + 1) + threadIdx.y; int c = (blockIdx.z) % channels; T bias = 0; if (bias_data != NULL) { bias = bias_data[c]; } int kernel_num = kernel_size * kernel_size; __shared__ T w_shared[32]; if (tidx < kernel_num) { if (forward) { w_shared[tidx] = weight_data[c * kernel_num + tidx]; } else { w_shared[tidx] = weight_data[c * kernel_num + kernel_num - 1 - tidx]; } } __syncthreads(); __shared__ T tmp_shared[32*32]; for (int n_off = 0; n_off < output_size; n_off += gridDim.z) { if (blockIdx.z + n_off < output_size) { T sum = 0; //int n = blockIdx.z / channels; // int i_off_x = threadIdx.x - padding; // int i_off_y = threadIdx.y - padding; if (o_idx - padding >= 0 && o_idx - padding < width && o_idy - padding >=0 && o_idy - padding < height) { tmp_shared[threadIdx.y * blockDim.x + threadIdx.x] = bottom_data[(blockIdx.z + n_off) * width * height + (o_idy - padding) * width + o_idx - padding]; // printf("tids %d, %d, oid %d, %d, padding %d, width %d, height %d, block %d, %d\n", tidx, tidy, o_idx, o_idy, padding, width, height, blockDim.x, blockDim.y); } else { tmp_shared[threadIdx.y * blockDim.x + threadIdx.x] = 0; } __syncthreads(); // std::cout << tidx << " " << tidy << " " << " o " << o_idx << " " << o_idy << " padding " << padding << " " << width << std::endl; if (o_idx >= 0 && o_idx < out_width && o_idy >=0 && o_idy < out_height && threadIdx.x < blockDim.x - kernel_size + 1 && threadIdx.y < blockDim.y - kernel_size + 1) { for (int i = 0; i < kernel_size; i++) { for (int j = 0; j < kernel_size; j++) { sum += tmp_shared[(threadIdx.y + i) * blockDim.x + threadIdx.x + j] * w_shared[i * kernel_size + j]; } } top_data[(n_off + blockIdx.z) * out_width * out_height + (o_idy ) * out_width + o_idx ] = sum + bias; } } else { // printf("blockDim %d, %d, %d. gridDim %d, %d, %d os %d z %d off %d ch %d\n", blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z, output_size, blockIdx.z, n_off, channels); } __syncthreads(); } } template <typename T> __global__ void DepthWiseConv2dLargeFForward(const T* bottom_data, const T* weight_data, const T* bias_data, const int channels, const int padding, const int height, const int width, const int kernel_size, const int out_height, const int out_width, const int batch_size, T* top_data) { __shared__ T true_r_shared[32*32]; int n_idx = blockIdx.x * blockDim.y + threadIdx.y; const int n_num = gridDim.x * blockDim.y; while (n_idx < channels * batch_size) { T* r_shared = true_r_shared; T sum[8] = {0}; T tmp = 0; const int c = n_idx % channels; int valid_kernel_w = kernel_size; int valid_kernel_h = kernel_size; T* data = const_cast<T*> (bottom_data); data = data + n_idx * width * height; T* weight = const_cast<T*> (weight_data); weight = weight + c * kernel_size * kernel_size; const int y_shift = blockIdx.y - padding; const int x_shift = blockIdx.z - padding; if (blockIdx.y < padding) { valid_kernel_h = valid_kernel_h + y_shift; weight = weight - y_shift * kernel_size; } else if (blockIdx.y >= out_height - padding) { valid_kernel_h = valid_kernel_h - (blockIdx.y - out_height + padding + 1); data = data + y_shift * width; } else { data = data + y_shift * width; } if (blockIdx.z < padding) { valid_kernel_w = valid_kernel_w + x_shift; weight = weight - x_shift; } else if (blockIdx.z >= out_width - padding) { valid_kernel_w = valid_kernel_w - (blockIdx.z - out_width + padding + 1); data = data + x_shift; } else { data = data + x_shift; } const int y_num = (valid_kernel_h / 8) * 8; r_shared = r_shared + threadIdx.y * blockDim.x; for (int tidx = threadIdx.x; tidx < valid_kernel_w; tidx += blockDim.x) { int tmp_tidx_d = tidx; int tmp_tidx_w = tidx; for (int tidy = 0; tidy < y_num; tidy += 8) { #pragma unroll for (int j = 0; j < 8; j++) { sum[j] += data[j * width + tmp_tidx_d] * weight[j * kernel_size + tmp_tidx_w]; } tmp_tidx_d = tmp_tidx_d + 8 * width; tmp_tidx_w = tmp_tidx_w + 8 * kernel_size; } for (int j = 0; j < valid_kernel_h - y_num; j++) { sum[j] += data[j * width + tmp_tidx_d] * weight[j * kernel_size + tmp_tidx_w]; } } #pragma unroll for (int j = 0; j < 8; j++) { tmp += sum[j]; } r_shared[threadIdx.x] = tmp; __syncthreads(); if (threadIdx.x < 32) { for (int j = 32 + threadIdx.x; j < blockDim.x; j += 32) { tmp += r_shared[j]; } r_shared[threadIdx.x] = tmp; } __syncthreads(); if (threadIdx.x == 0) { tmp = r_shared[0]; for (int j = 1; j < 32; j++) { tmp += r_shared[j]; } top_data[n_idx * out_width * out_height + blockIdx.y * out_width + blockIdx.z] = tmp; } __syncthreads(); n_idx += n_num; } } at::Tensor DepthWiseConv2d_forward_cuda(const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias, const int stride, const int padding, const int dilation, const int groups) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); auto batch_size = input.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto kernel_size = weight.size(2); auto out_height = (height - kernel_size + 1 + padding * 2) / stride; auto out_width = (width - kernel_size + 1 + padding * 2) / stride; AT_ASSERTM(weight.size(0) == channels, "Weight output channel must be equal to Input channel"); auto output = at::empty({batch_size, channels, out_height, out_width}, input.options()); auto blockdim = 32; if (out_width < kernel_size && out_width + kernel_size - 1 < 32) { blockdim = kernel_size; } else if (out_width + kernel_size - 1 < 32) { blockdim = out_width + kernel_size - 1; } auto blocks_x = THCCeilDiv((long)out_width, blockdim-kernel_size+1L); auto blocks_y = THCCeilDiv((long)out_height, blockdim-kernel_size+1L); auto output_size = batch_size * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto znum = output_size; if (znum > 2048) { znum = std::max((2048 / channels) * channels, channels); } if (kernel_size > 16) { int blocks_x = kernel_size <= 1024 ? kernel_size : 1024; int blocks_y = (1024) / blocks_x; //dim3 grid((channels * batch_size + blocks_y - 1) / blocks_y, out_height, out_width); dim3 grid((channels * batch_size) / blocks_y / 2, out_height, out_width); dim3 block(blocks_x, blocks_y); AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2d_forward", [&] { DepthWiseConv2dLargeFForward<scalar_t><<<grid, block, 0, stream>>>( input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, batch_size, output.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); } else if (width + 2*padding > 16 || height + 2 * padding> 16) { dim3 grid(blocks_x, blocks_y, znum); dim3 block(blockdim, blockdim); // std::cout << "SHAPE dim x " << blocks_x << " dim y " << blocks_y << " nc " << batch_size * channels << std::endl; // std::cout << channels << " " << padding << " " << height << " " << width << " " << kernel_size << std::endl; //printf("blockdim %d, %d, %d, griddim %d, %d, %d outputsize %d\n", block.x, block.y, block.z, grid.x, grid.y, grid.z, output_size); //if (output.numel() == 0) { // THCudaCheck(cudaGetLastError()); // return output; //} //niu // printf("blockdim %d, %d, %d, griddim %d, %d, %d outputsize %d, channels %d, width %d, height %d, padding %d, stride %d, bias %s, kernel_size %d\n", block.x, block.y, block.z, grid.x, grid.y, grid.z, batch_size, channels, width, height, padding, stride, bias.size(0), kernel_size); AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2d_forward", [&] { DepthWiseConv2dFForward<scalar_t><<<grid, block, 0, stream>>>( input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, output_size, true, output.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); } else { auto dimy = THCCeilDiv((long)(height > out_height ? height : out_height), 2L); auto blocks_x = 1; auto blocks_y = 1; dim3 grid(blocks_x, blocks_y, THCCeilDiv((long)channels*batch_size, 64L)); dim3 block(width > out_width ? width : out_width, dimy, 8); // printf("Small blockdim %d, %d, %d, griddim %d, %d, %d outputsize %d, channels %d, width %d, height %d, padding %d, stride %d, bias %s, kernel_size %d\n", block.x, block.y, block.z, grid.x, grid.y, grid.z, batch_size, channels, width, height, padding, stride, bias.size(0), kernel_size); if (kernel_size == 3) { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { DepthWiseConv2dSmallFForward<scalar_t, 3><<<grid, block, 0, stream>>>( input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, batch_size, true, output.data<scalar_t>()); }); } else if (kernel_size == 5) { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { DepthWiseConv2dSmallFForward<scalar_t, 5><<<grid, block, 0, stream>>>( input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, batch_size, true, output.data<scalar_t>()); }); } else { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { DepthWiseConv2dSmallFForward<scalar_t, 0><<<grid, block, 0, stream>>>( input.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, padding, height, width, kernel_size, out_height, out_width, batch_size, true, output.data<scalar_t>()); }); } THCudaCheck(cudaGetLastError()); } return output; } std::vector<at::Tensor> DepthWiseConv2d_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& weight, const at::Tensor& bias, const int stride, const int padding, const int dilation, const int groups) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); auto batch_size = input.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto kernel_size = weight.size(2); auto out_height = (height - kernel_size + 1 + padding * 2) / stride; auto out_width = (width - kernel_size + 1 + padding * 2) / stride; AT_ASSERTM(weight.size(0) == channels, "Weight output channel must be equal to Input channel"); // To deal with input grad computation. auto grad_input = at::empty({batch_size, channels, height, width}, grad.options()); auto grad_weight = at::empty({channels, 1, kernel_size, kernel_size}, grad.options()); auto grad_bias = at::empty({bias.size(0)}, grad.options()); auto blockdim = 32; auto bwd_padding = kernel_size - 1 - padding; auto bwd_s = 1; std::cout << out_width << "x" << out_height << " Grad " << grad.size(2) << "x" << grad.size(3) << std::endl; std::cout << grad.size(3) - kernel_size + 1 + bwd_padding * 2 << " bwd " << bwd_padding << std::endl; AT_ASSERTM(width == (grad.size(3) - kernel_size + 1 + bwd_padding * 2), "grad_input computed size should be equal to input size") if (width < kernel_size && width + kernel_size - 1 < 32) { blockdim = kernel_size; } else if (width + kernel_size - 1 < 32) { blockdim = width + kernel_size - 1; } auto blocks_x = THCCeilDiv((long)width, blockdim-kernel_size+1L); auto blocks_y = THCCeilDiv((long)height, blockdim-kernel_size+1L); auto output_size = batch_size * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto znum = output_size; if (znum > 2048) { znum = std::max((2048 / channels) * channels, channels); } if (out_width + 2*padding > 16 || out_height + 2 * padding> 16) { dim3 grid(blocks_x, blocks_y, znum); dim3 block(blockdim, blockdim); AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2d_forward", [&] { DepthWiseConv2dFForward<scalar_t><<<grid, block, 0, stream>>>( grad.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, bwd_padding, out_height, out_width, kernel_size, height, width, output_size, false, grad_input.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); } else { auto dimy = THCCeilDiv((long)(out_height > height ? out_height : height), 2L); auto blocks_x = 1; auto blocks_y = 1; dim3 grid(blocks_x, blocks_y, THCCeilDiv((long)channels*batch_size, 64L)); dim3 block(out_width > width ? out_width : width, dimy, 8); if (kernel_size == 3) { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { DepthWiseConv2dSmallFForward<scalar_t, 3><<<grid, block, 0, stream>>>( grad.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, bwd_padding, out_height, out_width, kernel_size, height, width, batch_size, false, grad_input.data<scalar_t>()); }); std::cout << "3 small" << std::endl; } else if (kernel_size == 5) { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { DepthWiseConv2dSmallFForward<scalar_t, 5><<<grid, block, 0, stream>>>( grad.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, bwd_padding, out_height, out_width, kernel_size, height, width, batch_size, false, grad_input.data<scalar_t>()); }); std::cout << "5 small" << std::endl; } else { AT_DISPATCH_FLOATING_TYPES(input.type(), "DepthWiseConv2dSmall_forward", [&] { DepthWiseConv2dSmallFForward<scalar_t, 0><<<grid, block, 0, stream>>>( grad.contiguous().data<scalar_t>(), weight.contiguous().data<scalar_t>(), bias.contiguous().data<scalar_t>(), channels, bwd_padding, out_height, out_width, kernel_size, height, width, batch_size, false, grad_input.data<scalar_t>()); }); std::cout << "Common small" << std::endl; //printf("<%d, %d, %d>\nGrid <%d, %d, %d>\nshape %d, %d, %d, %d\n", block.x, block.y, block.z, grid.x, grid.y, grid.z, width, height, out_width, out_height); } THCudaCheck(cudaGetLastError()); } //std::cout << "before return" << std::endl << out_width << std::endl << padding << std::endl << out_height << std::endl << width << std::endl; return std::vector<at::Tensor> {grad_input, grad_weight, grad_bias}; }
c076bd1be83fcbef930856c48dc5f797909e9ae0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Project: Mask R-CNN // File: ROIAlignLayer // Adopted from roi_pooling_layer.cu (written by Ross Grischik) // Author: Jasjeet Dhaliwal // ------------------------------------------------------------------ #include <cfloat> #include <iostream> #include <string> #include <utility> #include <vector> #include <algorithm> #include <stdlib.h> #include "caffe/fast_rcnn_layers.hpp" using std::max; using std::min; using std::floor; using std::ceil; using std::fabs; using std::cout; namespace caffe { template <typename Dtype> __global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_idx, Dtype* argmax_mult) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int argmax_index = index * 4; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; //Util Values Dtype zero = 0.0, one = 1.0; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w + 1.0, one); Dtype roi_height = max(roi_end_h - roi_start_h + 1.0, one); Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>(ph) * bin_size_h; Dtype wstart = static_cast<Dtype>(pw) * bin_size_w; Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h; Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w; // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, zero), static_cast<Dtype>(height) ); hend = min(max(hend + roi_start_h, zero), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, zero), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, zero), static_cast<Dtype>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxvalue = is_empty ? 0 : -FLT_MAX; int maxidx[4]; Dtype maxmult[4]; //int bottom_offset = (roi_batch_ind * channels + c) * height * width ; //bottom_data += (roi_batch_ind * channels + c) * height * width; /* Normalization function - normalizes values between -1 and 1. a = -1, b = 1 y = f(x) = [[(b - a) (x - roi_start_h)] / [roi_end_h - roi_start_h]] + a x = f^{-1}(y) = [[(f(x) - a)(roi_end_h - roi_end_h)] / (b - a)] + roi_start_h Normalized coordinates of 4 regularly sampled points in the ROI: sn_1 = (-0.5,-0.5) sn_2 = (-0.5,0.5) sn_3 = (0.5,-0.5) sn_4 = (0.5,0.5) // Debugging purposes Dtype x_pos = (((0.5 + 1)*(roi_end_w - roi_start_w))/2.0) + roi_start_w; Dtype x_neg = (((-0.5 + 1)*(roi_end_w - roi_start_w))/2.0) + roi_start_w; Dtype y_pos = (((0.5 + 1)*(roi_end_h - roi_start_h))/2.0) + roi_start_h; Dtype y_neg = (((-0.5 + 1)*(roi_end_h - roi_start_h))/2.0) + roi_start_h; Dtype samples[2] = {x_neg, y_neg, x_neg, y_pos, x_pos, y_neg, x_pos, y_pos}; */ Dtype samples_n[8] = {-0.5, -0.5, -0.5, 0.5, 0.5, -0.5, 0.5, 0.5}; //Holds interpolated values for each sample point Dtype bisampled[4]; int counter = 0; Dtype x_smp_n = -2.0, y_smp_n = -2.0, h_idx_n = -2.0, w_idx_n = -2.0; //Bilinearly Interpolate 4 sampled values for (int smp = 0; smp < sizeof(samples_n)/sizeof(*samples_n) ; smp+=2) { x_smp_n = samples_n[smp]; y_smp_n = samples_n[smp+1]; bisampled[smp/2] = 0.0; int b_index[4] = {-1, -1 , -1, -1}; // -1,-1,-1,-1}; //int b_index_curr[4] = {-1,-1,-1,-1}; Dtype multiplier[4] = {Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX)}; //Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX)}; counter = 0; //ceil(hstart) //floor(hend) for (int h_idx = ceil(hstart); h_idx <= floor(hend) && h_idx <= height && h_idx >= 0 ; ++h_idx) { for (int w_idx =ceil(wstart); w_idx <= floor(wend) && w_idx <= width && w_idx >= 0; ++w_idx) { if (counter < 4) { b_index[counter] = ((((roi_batch_ind * channels) + c) * height) + h_idx) * width + w_idx; // b_index_curr[counter]= h_idx*width + w_idx; //Normalize width and height to lie between -1 and 1 h_idx_n = static_cast<Dtype>( (static_cast<Dtype>(2)*(static_cast<Dtype>(h_idx) - roi_start_h) / (roi_end_h - roi_start_h)) - 1); w_idx_n = static_cast<Dtype>((static_cast<Dtype>(2)*(static_cast<Dtype>(w_idx) - roi_start_w) / (roi_end_w - roi_start_w)) - 1); h_idx_n = min(max(h_idx_n, static_cast<Dtype>(-1.0)),one); w_idx_n = min(max(w_idx_n, static_cast<Dtype>(-1.0)),one); multiplier[counter]= max(zero ,static_cast<Dtype>(1 - fabs(x_smp_n - w_idx_n))) * max(zero,static_cast<Dtype>(1 - fabs(y_smp_n - h_idx_n))); //bisampled[smp/2] += multiplier[counter]; bisampled[smp/2] += bottom_data[ b_index[counter]] * multiplier[counter]; ++counter; } else { goto stop; } } //w }//h stop: if (bisampled[smp/2] > maxvalue) { maxvalue = bisampled[smp/2]; //Using two loops to comply with c++ convention for (int i=0; i<4;++i) { maxidx[i] = b_index[i]; maxmult[i] = multiplier[i]; } } } //smp //Store value in the top blob top_data[index] = maxvalue; for (int i = 0; i<4; ++i, ++argmax_index) { argmax_idx[argmax_index] = maxidx[i]; argmax_mult[argmax_index] = maxmult[i]; } } } template <typename Dtype> void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_idx = max_pts_.mutable_gpu_data(); Dtype* argmax_mult = max_mult_.mutable_gpu_data(); int count = top[0]->count(); LOG(INFO) << "Doing forward now"; // NOLINT_NEXT_LINE(whitespace/operators) //Change CAFFE_CUDA_NUM_THREADS to 64 hipLaunchKernelGGL(( ROIAlignForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(32), 0, 0, count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_idx, argmax_mult); LOG(INFO) << "Done forward "; CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff, const int* argmax_idx, const Dtype* argmax_mult, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0.0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { //const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; //int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n // if (n != roi_batch_ind) { // continue; // } const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = ceil(offset_bottom_rois[1] * spatial_scale); int roi_start_h = ceil(offset_bottom_rois[2] * spatial_scale); int roi_end_w = floor(offset_bottom_rois[3] * spatial_scale); int roi_end_h = floor(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; int argmax_offset = offset * 4; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_idx = argmax_idx + argmax_offset; const Dtype* offset_argmax_mult = argmax_mult + argmax_offset; // Util Vals Dtype multiplier = 0.0; for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { for (int k = 0; k < 4; ++k) { if (offset_argmax_idx[((ph * pooled_width + pw) * 4) + k] == index ) { multiplier = offset_argmax_mult[( (ph * pooled_width + pw) * 4) + k]; gradient += offset_top_diff[ph * pooled_width + pw] * multiplier; } } }//pw }//ph }//rois bottom_diff[index] = gradient; } } template <typename Dtype> void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_idx = max_pts_.gpu_data(); const Dtype* argmax_mult = max_mult_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) // CAFFE_CUDA_NUM_THREADS replaced with 64 LOG(INFO) << "Doing backward "; hipLaunchKernelGGL(( ROIAlignBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(16), 0, 0, count, top_diff, argmax_idx, argmax_mult, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); LOG(INFO) << "Done backward"; CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer); } // namespace caffe
c076bd1be83fcbef930856c48dc5f797909e9ae0.cu
// ------------------------------------------------------------------ // Project: Mask R-CNN // File: ROIAlignLayer // Adopted from roi_pooling_layer.cu (written by Ross Grischik) // Author: Jasjeet Dhaliwal // ------------------------------------------------------------------ #include <cfloat> #include <iostream> #include <string> #include <utility> #include <vector> #include <algorithm> #include <stdlib.h> #include "caffe/fast_rcnn_layers.hpp" using std::max; using std::min; using std::floor; using std::ceil; using std::fabs; using std::cout; namespace caffe { template <typename Dtype> __global__ void ROIAlignForward(const int nthreads, const Dtype* bottom_data, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, int* argmax_idx, Dtype* argmax_mult) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int argmax_index = index * 4; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; Dtype roi_start_w = bottom_rois[1] * spatial_scale; Dtype roi_start_h = bottom_rois[2] * spatial_scale; Dtype roi_end_w = bottom_rois[3] * spatial_scale; Dtype roi_end_h = bottom_rois[4] * spatial_scale; //Util Values Dtype zero = 0.0, one = 1.0; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w + 1.0, one); Dtype roi_height = max(roi_end_h - roi_start_h + 1.0, one); Dtype bin_size_h = roi_height / static_cast<Dtype>(pooled_height); Dtype bin_size_w = roi_width / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>(ph) * bin_size_h; Dtype wstart = static_cast<Dtype>(pw) * bin_size_w; Dtype hend = static_cast<Dtype>(ph + 1) * bin_size_h; Dtype wend = static_cast<Dtype>(pw + 1) * bin_size_w; // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, zero), static_cast<Dtype>(height) ); hend = min(max(hend + roi_start_h, zero), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, zero), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, zero), static_cast<Dtype>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxvalue = is_empty ? 0 : -FLT_MAX; int maxidx[4]; Dtype maxmult[4]; //int bottom_offset = (roi_batch_ind * channels + c) * height * width ; //bottom_data += (roi_batch_ind * channels + c) * height * width; /* Normalization function - normalizes values between -1 and 1. a = -1, b = 1 y = f(x) = [[(b - a) (x - roi_start_h)] / [roi_end_h - roi_start_h]] + a x = f^{-1}(y) = [[(f(x) - a)(roi_end_h - roi_end_h)] / (b - a)] + roi_start_h Normalized coordinates of 4 regularly sampled points in the ROI: sn_1 = (-0.5,-0.5) sn_2 = (-0.5,0.5) sn_3 = (0.5,-0.5) sn_4 = (0.5,0.5) // Debugging purposes Dtype x_pos = (((0.5 + 1)*(roi_end_w - roi_start_w))/2.0) + roi_start_w; Dtype x_neg = (((-0.5 + 1)*(roi_end_w - roi_start_w))/2.0) + roi_start_w; Dtype y_pos = (((0.5 + 1)*(roi_end_h - roi_start_h))/2.0) + roi_start_h; Dtype y_neg = (((-0.5 + 1)*(roi_end_h - roi_start_h))/2.0) + roi_start_h; Dtype samples[2] = {x_neg, y_neg, x_neg, y_pos, x_pos, y_neg, x_pos, y_pos}; */ Dtype samples_n[8] = {-0.5, -0.5, -0.5, 0.5, 0.5, -0.5, 0.5, 0.5}; //Holds interpolated values for each sample point Dtype bisampled[4]; int counter = 0; Dtype x_smp_n = -2.0, y_smp_n = -2.0, h_idx_n = -2.0, w_idx_n = -2.0; //Bilinearly Interpolate 4 sampled values for (int smp = 0; smp < sizeof(samples_n)/sizeof(*samples_n) ; smp+=2) { x_smp_n = samples_n[smp]; y_smp_n = samples_n[smp+1]; bisampled[smp/2] = 0.0; int b_index[4] = {-1, -1 , -1, -1}; // -1,-1,-1,-1}; //int b_index_curr[4] = {-1,-1,-1,-1}; Dtype multiplier[4] = {Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX)}; //Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX), Dtype(-FLT_MAX)}; counter = 0; //ceil(hstart) //floor(hend) for (int h_idx = ceil(hstart); h_idx <= floor(hend) && h_idx <= height && h_idx >= 0 ; ++h_idx) { for (int w_idx =ceil(wstart); w_idx <= floor(wend) && w_idx <= width && w_idx >= 0; ++w_idx) { if (counter < 4) { b_index[counter] = ((((roi_batch_ind * channels) + c) * height) + h_idx) * width + w_idx; // b_index_curr[counter]= h_idx*width + w_idx; //Normalize width and height to lie between -1 and 1 h_idx_n = static_cast<Dtype>( (static_cast<Dtype>(2)*(static_cast<Dtype>(h_idx) - roi_start_h) / (roi_end_h - roi_start_h)) - 1); w_idx_n = static_cast<Dtype>((static_cast<Dtype>(2)*(static_cast<Dtype>(w_idx) - roi_start_w) / (roi_end_w - roi_start_w)) - 1); h_idx_n = min(max(h_idx_n, static_cast<Dtype>(-1.0)),one); w_idx_n = min(max(w_idx_n, static_cast<Dtype>(-1.0)),one); multiplier[counter]= max(zero ,static_cast<Dtype>(1 - fabs(x_smp_n - w_idx_n))) * max(zero,static_cast<Dtype>(1 - fabs(y_smp_n - h_idx_n))); //bisampled[smp/2] += multiplier[counter]; bisampled[smp/2] += bottom_data[ b_index[counter]] * multiplier[counter]; ++counter; } else { goto stop; } } //w }//h stop: if (bisampled[smp/2] > maxvalue) { maxvalue = bisampled[smp/2]; //Using two loops to comply with c++ convention for (int i=0; i<4;++i) { maxidx[i] = b_index[i]; maxmult[i] = multiplier[i]; } } } //smp //Store value in the top blob top_data[index] = maxvalue; for (int i = 0; i<4; ++i, ++argmax_index) { argmax_idx[argmax_index] = maxidx[i]; argmax_mult[argmax_index] = maxmult[i]; } } } template <typename Dtype> void ROIAlignLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_rois = bottom[1]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int* argmax_idx = max_pts_.mutable_gpu_data(); Dtype* argmax_mult = max_mult_.mutable_gpu_data(); int count = top[0]->count(); LOG(INFO) << "Doing forward now"; // NOLINT_NEXT_LINE(whitespace/operators) //Change CAFFE_CUDA_NUM_THREADS to 64 ROIAlignForward<Dtype><<<CAFFE_GET_BLOCKS(count), 32>>>( count, bottom_data, spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_rois, top_data, argmax_idx, argmax_mult); LOG(INFO) << "Done forward "; CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void ROIAlignBackward(const int nthreads, const Dtype* top_diff, const int* argmax_idx, const Dtype* argmax_mult, const int num_rois, const Dtype spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { CUDA_KERNEL_LOOP(index, nthreads) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0.0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { //const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; //int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n // if (n != roi_batch_ind) { // continue; // } const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } int roi_start_w = ceil(offset_bottom_rois[1] * spatial_scale); int roi_start_h = ceil(offset_bottom_rois[2] * spatial_scale); int roi_end_w = floor(offset_bottom_rois[3] * spatial_scale); int roi_end_h = floor(offset_bottom_rois[4] * spatial_scale); // Skip if ROI doesn't include (h, w) const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; int argmax_offset = offset * 4; const Dtype* offset_top_diff = top_diff + offset; const int* offset_argmax_idx = argmax_idx + argmax_offset; const Dtype* offset_argmax_mult = argmax_mult + argmax_offset; // Util Vals Dtype multiplier = 0.0; for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { for (int k = 0; k < 4; ++k) { if (offset_argmax_idx[((ph * pooled_width + pw) * 4) + k] == index ) { multiplier = offset_argmax_mult[( (ph * pooled_width + pw) * 4) + k]; gradient += offset_top_diff[ph * pooled_width + pw] * multiplier; } } }//pw }//ph }//rois bottom_diff[index] = gradient; } } template <typename Dtype> void ROIAlignLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* bottom_rois = bottom[1]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); const int* argmax_idx = max_pts_.gpu_data(); const Dtype* argmax_mult = max_mult_.gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) // CAFFE_CUDA_NUM_THREADS replaced with 64 LOG(INFO) << "Doing backward "; ROIAlignBackward<Dtype><<<CAFFE_GET_BLOCKS(count), 16>>>( count, top_diff, argmax_idx, argmax_mult, top[0]->num(), spatial_scale_, channels_, height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois); LOG(INFO) << "Done backward"; CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(ROIAlignLayer); } // namespace caffe
901e34769b6ba70ba555ca5e9c791b1dcacf0d1a.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" namespace filter { template void linearColumn<float4, short4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
901e34769b6ba70ba555ca5e9c791b1dcacf0d1a.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "column_filter.h" namespace filter { template void linearColumn<float4, short4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
38e8648869fef8ec23e95e86471fd0ec88adc2d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" namespace pcl { namespace device { __device__ __forceinline__ float getMinTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x; float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y; float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z) / dir.z; return fmax ( fmax (txmin, tymin), tzmin); } __device__ __forceinline__ float getMaxTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmax = ( (dir.x > 0 ? volume_max.x : 0.f) - origin.x) / dir.x; float tymax = ( (dir.y > 0 ? volume_max.y : 0.f) - origin.y) / dir.y; float tzmax = ( (dir.z > 0 ? volume_max.z : 0.f) - origin.z) / dir.z; return fmin (fmin (txmax, tymax), tzmax); } struct RayCaster { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 }; Mat33 Rcurr; float3 tcurr; float time_step; float3 volume_size; float3 cell_size; int cols, rows; PtrStep<short2> volume; Intr intr; mutable PtrStep<float> nmap; mutable PtrStep<float> vmap; __device__ __forceinline__ float3 get_ray_next (int x, int y) const { float3 ray_next; ray_next.x = (x - intr.cx) / intr.fx; ray_next.y = (y - intr.cy) / intr.fy; ray_next.z = 1; return ray_next; } __device__ __forceinline__ bool checkInds (const int3& g) const { return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < VOLUME_X && g.y < VOLUME_Y && g.z < VOLUME_Z); } __device__ __forceinline__ float readTsdf (int x, int y, int z) const { return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]); } __device__ __forceinline__ int3 getVoxel (float3 point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ float interpolateTrilineary (const float3& origin, const float3& dir, float time) const { return interpolateTrilineary (origin + dir * time); } __device__ __forceinline__ float interpolateTrilineary (const float3& point) const { int3 g = getVoxel (point); if (g.x <= 0 || g.x >= VOLUME_X - 1) return std::numeric_limits<float>::quiet_NaN (); if (g.y <= 0 || g.y >= VOLUME_Y - 1) return std::numeric_limits<float>::quiet_NaN (); if (g.z <= 0 || g.z >= VOLUME_Z - 1) return std::numeric_limits<float>::quiet_NaN (); float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; g.x = (point.x < vx) ? (g.x - 1) : g.x; g.y = (point.y < vy) ? (g.y - 1) : g.y; g.z = (point.z < vz) ? (g.z - 1) : g.z; float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c + readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c + readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c + readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c; return res; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= cols || y >= rows) return; vmap.ptr (y)[x] = std::numeric_limits<float>::quiet_NaN (); nmap.ptr (y)[x] = std::numeric_limits<float>::quiet_NaN (); float3 ray_start = tcurr; float3 ray_next = Rcurr * get_ray_next (x, y) + tcurr; float3 ray_dir = normalized (ray_next - ray_start); //ensure that it isn't a degenerate case ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x; ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y; ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z; // computer time when entry and exit volume float time_start_volume = getMinTime (volume_size, ray_start, ray_dir); float time_exit_volume = getMaxTime (volume_size, ray_start, ray_dir); const float min_dist = 0.f; //in meters time_start_volume = fmax (time_start_volume, min_dist); if (time_start_volume >= time_exit_volume) return; float time_curr = time_start_volume; int3 g = getVoxel (ray_start + ray_dir * time_curr); g.x = max (0, min (g.x, VOLUME_X - 1)); g.y = max (0, min (g.y, VOLUME_Y - 1)); g.z = max (0, min (g.z, VOLUME_Z - 1)); float tsdf = readTsdf (g.x, g.y, g.z); //infinite loop guard const float max_time = 3 * (volume_size.x + volume_size.y + volume_size.z); for (; time_curr < max_time; time_curr += time_step) { float tsdf_prev = tsdf; int3 g = getVoxel ( ray_start + ray_dir * (time_curr + time_step) ); if (!checkInds (g)) break; tsdf = readTsdf (g.x, g.y, g.z); if (tsdf_prev < 0.f && tsdf > 0.f) break; if (tsdf_prev > 0.f && tsdf < 0.f) //zero crossing { float Ftdt = interpolateTrilineary (ray_start, ray_dir, time_curr + time_step); if (isnan (Ftdt)) break; float Ft = interpolateTrilineary (ray_start, ray_dir, time_curr); if (isnan (Ft)) break; //float Ts = time_curr - time_step * Ft/(Ftdt - Ft); float Ts = time_curr - time_step * Ft / (Ftdt - Ft); float3 vetex_found = ray_start + ray_dir * Ts; vmap.ptr (y )[x] = vetex_found.x; vmap.ptr (y + rows)[x] = vetex_found.y; vmap.ptr (y + 2 * rows)[x] = vetex_found.z; int3 g = getVoxel ( ray_start + ray_dir * time_curr ); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2) { float3 t; float3 n; t = vetex_found; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t); t = vetex_found; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t); n.x = (Fx1 - Fx2); t = vetex_found; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t); t = vetex_found; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t); n.y = (Fy1 - Fy2); t = vetex_found; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t); t = vetex_found; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t); n.z = (Fz1 - Fz2); n = normalized (n); nmap.ptr (y )[x] = n.x; nmap.ptr (y + rows)[x] = n.y; nmap.ptr (y + 2 * rows)[x] = n.z; } break; } } } }; __global__ void rayCastKernel (const RayCaster rc) { rc (); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr, float tranc_dist, const float3& volume_size, const PtrStep<short2>& volume, MapArr& vmap, MapArr& nmap) { RayCaster rc; rc.Rcurr = Rcurr; rc.tcurr = tcurr; rc.time_step = tranc_dist * 0.8f; rc.volume_size = volume_size; rc.cell_size.x = volume_size.x / VOLUME_X; rc.cell_size.y = volume_size.y / VOLUME_Y; rc.cell_size.z = volume_size.z / VOLUME_Z; rc.cols = vmap.cols (); rc.rows = vmap.rows () / 3; rc.intr = intr; rc.volume = volume; rc.vmap = vmap; rc.nmap = nmap; dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y); dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y)); hipLaunchKernelGGL(( rayCastKernel), dim3(grid), dim3(block), 0, 0, rc); cudaSafeCall (hipGetLastError ()); //cudaSafeCall(hipDeviceSynchronize()); }
38e8648869fef8ec23e95e86471fd0ec88adc2d9.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" namespace pcl { namespace device { __device__ __forceinline__ float getMinTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmin = ( (dir.x > 0 ? 0.f : volume_max.x) - origin.x) / dir.x; float tymin = ( (dir.y > 0 ? 0.f : volume_max.y) - origin.y) / dir.y; float tzmin = ( (dir.z > 0 ? 0.f : volume_max.z) - origin.z) / dir.z; return fmax ( fmax (txmin, tymin), tzmin); } __device__ __forceinline__ float getMaxTime (const float3& volume_max, const float3& origin, const float3& dir) { float txmax = ( (dir.x > 0 ? volume_max.x : 0.f) - origin.x) / dir.x; float tymax = ( (dir.y > 0 ? volume_max.y : 0.f) - origin.y) / dir.y; float tzmax = ( (dir.z > 0 ? volume_max.z : 0.f) - origin.z) / dir.z; return fmin (fmin (txmax, tymax), tzmax); } struct RayCaster { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8 }; Mat33 Rcurr; float3 tcurr; float time_step; float3 volume_size; float3 cell_size; int cols, rows; PtrStep<short2> volume; Intr intr; mutable PtrStep<float> nmap; mutable PtrStep<float> vmap; __device__ __forceinline__ float3 get_ray_next (int x, int y) const { float3 ray_next; ray_next.x = (x - intr.cx) / intr.fx; ray_next.y = (y - intr.cy) / intr.fy; ray_next.z = 1; return ray_next; } __device__ __forceinline__ bool checkInds (const int3& g) const { return (g.x >= 0 && g.y >= 0 && g.z >= 0 && g.x < VOLUME_X && g.y < VOLUME_Y && g.z < VOLUME_Z); } __device__ __forceinline__ float readTsdf (int x, int y, int z) const { return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]); } __device__ __forceinline__ int3 getVoxel (float3 point) const { int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity int vy = __float2int_rd (point.y / cell_size.y); int vz = __float2int_rd (point.z / cell_size.z); return make_int3 (vx, vy, vz); } __device__ __forceinline__ float interpolateTrilineary (const float3& origin, const float3& dir, float time) const { return interpolateTrilineary (origin + dir * time); } __device__ __forceinline__ float interpolateTrilineary (const float3& point) const { int3 g = getVoxel (point); if (g.x <= 0 || g.x >= VOLUME_X - 1) return std::numeric_limits<float>::quiet_NaN (); if (g.y <= 0 || g.y >= VOLUME_Y - 1) return std::numeric_limits<float>::quiet_NaN (); if (g.z <= 0 || g.z >= VOLUME_Z - 1) return std::numeric_limits<float>::quiet_NaN (); float vx = (g.x + 0.5f) * cell_size.x; float vy = (g.y + 0.5f) * cell_size.y; float vz = (g.z + 0.5f) * cell_size.z; g.x = (point.x < vx) ? (g.x - 1) : g.x; g.y = (point.y < vy) ? (g.y - 1) : g.y; g.z = (point.z < vz) ? (g.z - 1) : g.z; float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x; float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y; float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z; float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) + readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c + readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) + readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c + readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) + readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c + readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) + readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c; return res; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= cols || y >= rows) return; vmap.ptr (y)[x] = std::numeric_limits<float>::quiet_NaN (); nmap.ptr (y)[x] = std::numeric_limits<float>::quiet_NaN (); float3 ray_start = tcurr; float3 ray_next = Rcurr * get_ray_next (x, y) + tcurr; float3 ray_dir = normalized (ray_next - ray_start); //ensure that it isn't a degenerate case ray_dir.x = (ray_dir.x == 0.f) ? 1e-15 : ray_dir.x; ray_dir.y = (ray_dir.y == 0.f) ? 1e-15 : ray_dir.y; ray_dir.z = (ray_dir.z == 0.f) ? 1e-15 : ray_dir.z; // computer time when entry and exit volume float time_start_volume = getMinTime (volume_size, ray_start, ray_dir); float time_exit_volume = getMaxTime (volume_size, ray_start, ray_dir); const float min_dist = 0.f; //in meters time_start_volume = fmax (time_start_volume, min_dist); if (time_start_volume >= time_exit_volume) return; float time_curr = time_start_volume; int3 g = getVoxel (ray_start + ray_dir * time_curr); g.x = max (0, min (g.x, VOLUME_X - 1)); g.y = max (0, min (g.y, VOLUME_Y - 1)); g.z = max (0, min (g.z, VOLUME_Z - 1)); float tsdf = readTsdf (g.x, g.y, g.z); //infinite loop guard const float max_time = 3 * (volume_size.x + volume_size.y + volume_size.z); for (; time_curr < max_time; time_curr += time_step) { float tsdf_prev = tsdf; int3 g = getVoxel ( ray_start + ray_dir * (time_curr + time_step) ); if (!checkInds (g)) break; tsdf = readTsdf (g.x, g.y, g.z); if (tsdf_prev < 0.f && tsdf > 0.f) break; if (tsdf_prev > 0.f && tsdf < 0.f) //zero crossing { float Ftdt = interpolateTrilineary (ray_start, ray_dir, time_curr + time_step); if (isnan (Ftdt)) break; float Ft = interpolateTrilineary (ray_start, ray_dir, time_curr); if (isnan (Ft)) break; //float Ts = time_curr - time_step * Ft/(Ftdt - Ft); float Ts = time_curr - time_step * Ft / (Ftdt - Ft); float3 vetex_found = ray_start + ray_dir * Ts; vmap.ptr (y )[x] = vetex_found.x; vmap.ptr (y + rows)[x] = vetex_found.y; vmap.ptr (y + 2 * rows)[x] = vetex_found.z; int3 g = getVoxel ( ray_start + ray_dir * time_curr ); if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2) { float3 t; float3 n; t = vetex_found; t.x += cell_size.x; float Fx1 = interpolateTrilineary (t); t = vetex_found; t.x -= cell_size.x; float Fx2 = interpolateTrilineary (t); n.x = (Fx1 - Fx2); t = vetex_found; t.y += cell_size.y; float Fy1 = interpolateTrilineary (t); t = vetex_found; t.y -= cell_size.y; float Fy2 = interpolateTrilineary (t); n.y = (Fy1 - Fy2); t = vetex_found; t.z += cell_size.z; float Fz1 = interpolateTrilineary (t); t = vetex_found; t.z -= cell_size.z; float Fz2 = interpolateTrilineary (t); n.z = (Fz1 - Fz2); n = normalized (n); nmap.ptr (y )[x] = n.x; nmap.ptr (y + rows)[x] = n.y; nmap.ptr (y + 2 * rows)[x] = n.z; } break; } } } }; __global__ void rayCastKernel (const RayCaster rc) { rc (); } } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void pcl::device::raycast (const Intr& intr, const Mat33& Rcurr, const float3& tcurr, float tranc_dist, const float3& volume_size, const PtrStep<short2>& volume, MapArr& vmap, MapArr& nmap) { RayCaster rc; rc.Rcurr = Rcurr; rc.tcurr = tcurr; rc.time_step = tranc_dist * 0.8f; rc.volume_size = volume_size; rc.cell_size.x = volume_size.x / VOLUME_X; rc.cell_size.y = volume_size.y / VOLUME_Y; rc.cell_size.z = volume_size.z / VOLUME_Z; rc.cols = vmap.cols (); rc.rows = vmap.rows () / 3; rc.intr = intr; rc.volume = volume; rc.vmap = vmap; rc.nmap = nmap; dim3 block (RayCaster::CTA_SIZE_X, RayCaster::CTA_SIZE_Y); dim3 grid (divUp (rc.cols, block.x), divUp (rc.rows, block.y)); rayCastKernel<<<grid, block>>>(rc); cudaSafeCall (cudaGetLastError ()); //cudaSafeCall(cudaDeviceSynchronize()); }
1d8ea1514c538c64d4a604f408f6fc51f8b7072b.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <ctime> #include <cstdlib> #include <cstdio> #include <cmath> #include "hip/hip_runtime.h" using namespace std; #define TPB 1024 #define min(a,b) ((a < b) ? a : b) __global__ void scat_part_sum(double * array, double * array_psums) { // Distributes the values from array_psums (array of partial sums) to every element // in the array. Every thread in a block gets the same partial sum added to it int tid = (blockIdx.x * blockDim.x) + threadIdx.x; //__syncthreads(); array[tid] += array_psums[blockIdx.x]; //__syncthreads(); } __global__ void upsweep (double * array, double * array_aggr1, int size) { // Performs an upsweep int bid = blockIdx.x * blockDim.x; int tid = (blockIdx.x * blockDim.x) + threadIdx.x; int min_size = min(size, TPB); __syncthreads(); // Merge elements like a binary tree for (int step = 2; step <= min_size ; step *= 2) { if (tid % step == (step - 1) && (tid - (step / 2) >= bid)) { array[tid] += array[tid - (step / 2)]; } __syncthreads(); } __syncthreads(); // Aggregates the sum of each block to another array for to calculate partial tums if (array_aggr1 != NULL) { if (threadIdx.x == (TPB - 1)) { if (tid < size) { array_aggr1[blockIdx.x] = array[tid]; } else { array_aggr1[blockIdx.x] = array[size - 1]; } } __syncthreads(); } } __global__ void excl_downsweep (double * array, int size) { int bsize = blockIdx.x * blockDim.x; int next_block = (blockIdx.x + 1) * blockDim.x; int tid = bsize + threadIdx.x; int tmp; int min_size = min(size, TPB); // Performs an exlusive down sweep. After the inclusive down sweep, each block // will have elements 0, 0 + a_1 , 0 + a_1 + a_2, ... , 0 + a_1 + a_2 + ... + a_1023 if (tid % TPB == 0) { array[min(size, next_block) - 1] = 0; } __syncthreads(); for (int step = min_size; step > 0; step /= 2) { if (tid % step == (step - 1) && (tid - (step / 2) >= bsize)) { tmp = array[tid]; array[tid] += array[tid - (step / 2)]; array[tid - (step / 2)] = tmp; } __syncthreads(); } } __global__ void incl_downsweep (double * array) { int next_bid = (blockIdx.x + 1) * blockDim.x; int tid = (blockIdx.x * blockDim.x) + threadIdx.x; __syncthreads(); // Performs an inclusive down sweep. After the inclusive down sweep, each block // will have elements a_1, a_1 + a_2, ... , a_1 + a_2 + ... + a_1024 for (int step = TPB / 2; step > 1; step /= 2) { if (tid % step == (step - 1) && (tid + (step / 2) < next_bid)) { array[tid + (step / 2)] += array[tid]; } __syncthreads(); } } void sum(double* a, double* b, const int n) { //Given an array a[0...n-1], you need to compute b[0...n-1], //where b[i] = (i+1)*a[0] + i*a[1] + ... + 2*a[i-1] + a[i] //Note that b is NOT initialized with 0, be careful! //Write your CUDA code starting from here //Add any functions (e.g., device function) you want within this file int size = n; int size_div1 = int(ceil(double(size) / double(TPB))); int size_div2 = int(ceil(double(size_div1) / double(TPB))); int nblocks = int(ceil(double(size) / double(TPB))); int nblocks_div1 = int(ceil(double(nblocks) / double(TPB))); int nblocks_div2 = int(ceil(double(nblocks_div1) / double(TPB))); double *d_x, *d_x1, *d_x2; hipMalloc(&d_x, size * sizeof(double)); hipMalloc(&d_x1, size_div1 * sizeof(double)); hipMalloc(&d_x2, size_div2 * sizeof(double)); hipMemcpy(d_x, a, size * sizeof(double), hipMemcpyHostToDevice); //hipMemcpy(d_x1, x1, size_div1 * sizeof(double), hipMemcpyHostToDevice); //hipMemcpy(d_x2, x2, size_div2 * sizeof(double), hipMemcpyHostToDevice); for (int pf_step = 1; pf_step < 3; pf_step++) { // cout << "Up-Sweep\n" << endl; hipLaunchKernelGGL(( upsweep) , dim3(nblocks), dim3(TPB), 0, 0, d_x, d_x1, size); hipDeviceSynchronize(); // cout << "Up-Sweep 2\n" << endl; hipLaunchKernelGGL(( upsweep) , dim3(nblocks_div1), dim3(TPB), 0, 0, d_x1, d_x2, size_div1); hipDeviceSynchronize(); // cout << "Up-Sweep 3\n" << endl; hipLaunchKernelGGL(( upsweep) , dim3(nblocks_div1), dim3(TPB), 0, 0, d_x2, NULL, size_div2); hipDeviceSynchronize(); // cout << "Down-Sweep 3\n" << endl; hipLaunchKernelGGL(( excl_downsweep) , dim3(nblocks_div2), dim3(TPB), 0, 0, d_x2, size_div2); hipDeviceSynchronize(); // cout << "Down-Sweep 2\n" << endl; hipLaunchKernelGGL(( excl_downsweep) , dim3(nblocks_div1), dim3(TPB), 0, 0, d_x1, size_div1); hipDeviceSynchronize(); // cout << "Down-Sweep\n" << endl; hipLaunchKernelGGL(( incl_downsweep) , dim3(nblocks), dim3(TPB), 0, 0, d_x); hipDeviceSynchronize(); // cout << "Scatter Partial Sums 2\n" << endl; hipLaunchKernelGGL(( scat_part_sum) , dim3(nblocks_div1), dim3(TPB), 0, 0, d_x1, d_x2); hipDeviceSynchronize(); // cout << "Scatter Partial Sums 1\n" << endl; hipLaunchKernelGGL(( scat_part_sum) , dim3(nblocks), dim3(TPB), 0, 0, d_x, d_x1); hipDeviceSynchronize(); hipMemcpy(b, d_x, size * sizeof(double), hipMemcpyDeviceToHost); } } int main(int argc, const char * argv[]) { if (argc != 2) { printf("The argument is wrong! Execute your program with only input file name!\n"); return 1; } int n = 1 << 24; //Dummy code for creating a random input vectors //Convenient for the text purpose //Please comment out when you submit your code!!!!!!!!! /* FILE *fpw = fopen(argv[1], "w"); if (fpw == NULL) { printf("The file can not be created!\n"); return 1; } //int n = 1 << 24; fprintf(fpw, "%d\n", n); srand(time(NULL)); for (int i=0; i<n; i++) fprintf(fpw, "%lg\n", ((double)(rand() % n))/100); fclose(fpw); printf("Finished writing\n"); */ //Read input from input file specified by user FILE* fpr = fopen(argv[1], "r"); if (fpr == NULL) { printf("The file can not be opened or does not exist!\n"); return 1; } //int n; fscanf(fpr, "%d\n", &n); printf("%d\n", n); double* a = (double*)malloc(n*sizeof(double)); double* b = (double*)malloc(n*sizeof(double)); for (int i=0; i<n; i++) { fscanf(fpr, "%lg\n", &a[i]); } fclose(fpr); //Main function sum(a, b, n); //Write b into output file FILE* fpo = fopen("output.txt","w"); if (fpo == NULL) { printf("The file can not be created!\n"); return 1; } fprintf(fpo, "%d\n", n); for (int i=0; i<n; i++) fprintf(fpo, "%lg\n", b[i]); fclose(fpo); free(a); free(b); printf("Done...\n"); return 0; }
1d8ea1514c538c64d4a604f408f6fc51f8b7072b.cu
#include <iostream> #include <iomanip> #include <ctime> #include <cstdlib> #include <cstdio> #include <cmath> #include "cuda_runtime.h" using namespace std; #define TPB 1024 #define min(a,b) ((a < b) ? a : b) __global__ void scat_part_sum(double * array, double * array_psums) { // Distributes the values from array_psums (array of partial sums) to every element // in the array. Every thread in a block gets the same partial sum added to it int tid = (blockIdx.x * blockDim.x) + threadIdx.x; //__syncthreads(); array[tid] += array_psums[blockIdx.x]; //__syncthreads(); } __global__ void upsweep (double * array, double * array_aggr1, int size) { // Performs an upsweep int bid = blockIdx.x * blockDim.x; int tid = (blockIdx.x * blockDim.x) + threadIdx.x; int min_size = min(size, TPB); __syncthreads(); // Merge elements like a binary tree for (int step = 2; step <= min_size ; step *= 2) { if (tid % step == (step - 1) && (tid - (step / 2) >= bid)) { array[tid] += array[tid - (step / 2)]; } __syncthreads(); } __syncthreads(); // Aggregates the sum of each block to another array for to calculate partial tums if (array_aggr1 != NULL) { if (threadIdx.x == (TPB - 1)) { if (tid < size) { array_aggr1[blockIdx.x] = array[tid]; } else { array_aggr1[blockIdx.x] = array[size - 1]; } } __syncthreads(); } } __global__ void excl_downsweep (double * array, int size) { int bsize = blockIdx.x * blockDim.x; int next_block = (blockIdx.x + 1) * blockDim.x; int tid = bsize + threadIdx.x; int tmp; int min_size = min(size, TPB); // Performs an exlusive down sweep. After the inclusive down sweep, each block // will have elements 0, 0 + a_1 , 0 + a_1 + a_2, ... , 0 + a_1 + a_2 + ... + a_1023 if (tid % TPB == 0) { array[min(size, next_block) - 1] = 0; } __syncthreads(); for (int step = min_size; step > 0; step /= 2) { if (tid % step == (step - 1) && (tid - (step / 2) >= bsize)) { tmp = array[tid]; array[tid] += array[tid - (step / 2)]; array[tid - (step / 2)] = tmp; } __syncthreads(); } } __global__ void incl_downsweep (double * array) { int next_bid = (blockIdx.x + 1) * blockDim.x; int tid = (blockIdx.x * blockDim.x) + threadIdx.x; __syncthreads(); // Performs an inclusive down sweep. After the inclusive down sweep, each block // will have elements a_1, a_1 + a_2, ... , a_1 + a_2 + ... + a_1024 for (int step = TPB / 2; step > 1; step /= 2) { if (tid % step == (step - 1) && (tid + (step / 2) < next_bid)) { array[tid + (step / 2)] += array[tid]; } __syncthreads(); } } void sum(double* a, double* b, const int n) { //Given an array a[0...n-1], you need to compute b[0...n-1], //where b[i] = (i+1)*a[0] + i*a[1] + ... + 2*a[i-1] + a[i] //Note that b is NOT initialized with 0, be careful! //Write your CUDA code starting from here //Add any functions (e.g., device function) you want within this file int size = n; int size_div1 = int(ceil(double(size) / double(TPB))); int size_div2 = int(ceil(double(size_div1) / double(TPB))); int nblocks = int(ceil(double(size) / double(TPB))); int nblocks_div1 = int(ceil(double(nblocks) / double(TPB))); int nblocks_div2 = int(ceil(double(nblocks_div1) / double(TPB))); double *d_x, *d_x1, *d_x2; cudaMalloc(&d_x, size * sizeof(double)); cudaMalloc(&d_x1, size_div1 * sizeof(double)); cudaMalloc(&d_x2, size_div2 * sizeof(double)); cudaMemcpy(d_x, a, size * sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(d_x1, x1, size_div1 * sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(d_x2, x2, size_div2 * sizeof(double), cudaMemcpyHostToDevice); for (int pf_step = 1; pf_step < 3; pf_step++) { // cout << "Up-Sweep\n" << endl; upsweep <<<nblocks, TPB>>> (d_x, d_x1, size); cudaDeviceSynchronize(); // cout << "Up-Sweep 2\n" << endl; upsweep <<<nblocks_div1, TPB>>> (d_x1, d_x2, size_div1); cudaDeviceSynchronize(); // cout << "Up-Sweep 3\n" << endl; upsweep <<<nblocks_div1, TPB>>> (d_x2, NULL, size_div2); cudaDeviceSynchronize(); // cout << "Down-Sweep 3\n" << endl; excl_downsweep <<<nblocks_div2, TPB>>> (d_x2, size_div2); cudaDeviceSynchronize(); // cout << "Down-Sweep 2\n" << endl; excl_downsweep <<<nblocks_div1, TPB>>> (d_x1, size_div1); cudaDeviceSynchronize(); // cout << "Down-Sweep\n" << endl; incl_downsweep <<<nblocks, TPB>>> (d_x); cudaDeviceSynchronize(); // cout << "Scatter Partial Sums 2\n" << endl; scat_part_sum <<<nblocks_div1, TPB>>> (d_x1, d_x2); cudaDeviceSynchronize(); // cout << "Scatter Partial Sums 1\n" << endl; scat_part_sum <<<nblocks, TPB>>> (d_x, d_x1); cudaDeviceSynchronize(); cudaMemcpy(b, d_x, size * sizeof(double), cudaMemcpyDeviceToHost); } } int main(int argc, const char * argv[]) { if (argc != 2) { printf("The argument is wrong! Execute your program with only input file name!\n"); return 1; } int n = 1 << 24; //Dummy code for creating a random input vectors //Convenient for the text purpose //Please comment out when you submit your code!!!!!!!!! /* FILE *fpw = fopen(argv[1], "w"); if (fpw == NULL) { printf("The file can not be created!\n"); return 1; } //int n = 1 << 24; fprintf(fpw, "%d\n", n); srand(time(NULL)); for (int i=0; i<n; i++) fprintf(fpw, "%lg\n", ((double)(rand() % n))/100); fclose(fpw); printf("Finished writing\n"); */ //Read input from input file specified by user FILE* fpr = fopen(argv[1], "r"); if (fpr == NULL) { printf("The file can not be opened or does not exist!\n"); return 1; } //int n; fscanf(fpr, "%d\n", &n); printf("%d\n", n); double* a = (double*)malloc(n*sizeof(double)); double* b = (double*)malloc(n*sizeof(double)); for (int i=0; i<n; i++) { fscanf(fpr, "%lg\n", &a[i]); } fclose(fpr); //Main function sum(a, b, n); //Write b into output file FILE* fpo = fopen("output.txt","w"); if (fpo == NULL) { printf("The file can not be created!\n"); return 1; } fprintf(fpo, "%d\n", n); for (int i=0; i<n; i++) fprintf(fpo, "%lg\n", b[i]); fclose(fpo); free(a); free(b); printf("Done...\n"); return 0; }
777f29daba576d5cdf8ded1bca5477f762e0cc13.hip
// !!! This is a file automatically generated by hipify!!! #include <primitiv/config.h> #include <primitiv/devices/cuda/device.h> #include <primitiv/devices/cuda/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace primitiv { namespace devices { void CUDA::random_normal_impl(float mean, float sd, Tensor &y) { CUDA_CALL(::hipSetDevice(dev_id_)); CURAND_CALL(::hiprandGenerateNormal( state_->hiprand.get(), MDATA(y), y.shape().size(), mean, sd)); } } // namespace devices } // namespace primitiv
777f29daba576d5cdf8ded1bca5477f762e0cc13.cu
#include <primitiv/config.h> #include <primitiv/devices/cuda/device.h> #include <primitiv/devices/cuda/ops/common.h> #include <primitiv/internal/cuda/utils.h> namespace primitiv { namespace devices { void CUDA::random_normal_impl(float mean, float sd, Tensor &y) { CUDA_CALL(::cudaSetDevice(dev_id_)); CURAND_CALL(::curandGenerateNormal( state_->curand.get(), MDATA(y), y.shape().size(), mean, sd)); } } // namespace devices } // namespace primitiv
1fb197bf408b4392de11c8de8ce73fa1768abd21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" ///////////////////////////////////////////////////////////////////////////// /// Copyright 2020 Google LLC /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// https://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. ///////////////////////////////////////////////////////////////////////////// /// Modifications: pedro hermosilla ([email protected]) ///////////////////////////////////////////////////////////////////////////// #include "defines.hpp" #include "math_helper.cuh" #include "cuda_kernel_utils.cuh" #include "grid_utils.cuh" #include "compute_keys.cuh" ///////////////////////// GPU /** * GPU kernel to compute the keys of each point. * @param pNumPts Number of points. * @param pPts Array of points. * @param pBatchIds Array of batch ids. * @param pSAABBMin Array of scaled minimum point of bounding * boxes. * @param pNumCells Number of cells. * @param pInvCellSize Inverse cell size. * @param pOutKeys Output array with the point keys. */ template<int D> __global__ void compute_keys_gpu_kernel( const unsigned int pNumPts, const mccnn::fpoint<D>* __restrict__ pPts, const int* __restrict__ pBatchIds, const mccnn::fpoint<D>* __restrict__ pSAABBMin, const mccnn::ipoint<D>* __restrict__ pNumCells, const mccnn::fpoint<D>* __restrict__ pInvCellSize, mccnn::int64_m* __restrict__ pOutKeys) { int initPtIndex = mccnn::compute_global_index_gpu_funct(); int totalThreads = mccnn::compute_total_threads_gpu_funct(); for(int curPtIndex = initPtIndex; curPtIndex < pNumPts; curPtIndex += totalThreads) { //Get the values for the point. int curBatchId = pBatchIds[curPtIndex]; mccnn::fpoint<D> curPt = pPts[curPtIndex]; mccnn::fpoint<D> curSAABBMin = pSAABBMin[curBatchId]; //Compute the current cell indices. mccnn::ipoint<D> cell = mccnn::compute_cell_gpu_funct( curPt, curSAABBMin, pNumCells[0], pInvCellSize[0]); //Compute the key index of the cell. mccnn::int64_m keyIndex = mccnn::compute_key_gpu_funct( cell, pNumCells[0], curBatchId); //Save the key index. pOutKeys[curPtIndex] = keyIndex; } } ///////////////////////// CPU template<int D> void mccnn::compute_keys_gpu( std::unique_ptr<IGPUDevice>& pDevice, const unsigned int pNumPts, const float* pInGPUPtrPts, const int* pInGPUPtrBatchIds, const float* pInGPUPtrSAABBMin, const int* pInGPUPtrNumCells, const float* pInGPUPtrInvCellSizes, mccnn::int64_m* pOutGPUPtrKeys) { //Get the cuda stream. auto cudaStream = pDevice->getCUDAStream(); #ifdef DEBUG_INFO hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, cudaStream); #endif //Get the device properties. const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); //Calculate the ideal number of blocks for the selected block size. unsigned int numMP = gpuProps.numMPs_; unsigned int blockSize = gpuProps.warpSize_*2; unsigned int numBlocks = pDevice->get_max_active_block_x_sm( blockSize,(const void*)compute_keys_gpu_kernel<D>, 0); pDevice->check_error(__FILE__, __LINE__); //Calculate the total number of blocks to execute. unsigned int execBlocks = pNumPts/blockSize; execBlocks += (pNumPts%blockSize != 0)?1:0; unsigned int totalNumBlocks = numMP*numBlocks; totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; //Execute the cuda kernel. hipLaunchKernelGGL(( compute_keys_gpu_kernel<D>), dim3(totalNumBlocks), dim3(blockSize), 0, cudaStream, pNumPts, (const mccnn::fpoint<D>*)pInGPUPtrPts, pInGPUPtrBatchIds, (const mccnn::fpoint<D>*)pInGPUPtrSAABBMin, (const mccnn::ipoint<D>*)pInGPUPtrNumCells, (const mccnn::fpoint<D>*)pInGPUPtrInvCellSizes, pOutGPUPtrKeys); pDevice->check_error(__FILE__, __LINE__); #ifdef DEBUG_INFO hipEventRecord(stop, cudaStream); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; fprintf(stderr, "### COMPUTE KEYS ###\n"); fprintf(stderr, "Num points: %d\n", pNumPts); fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); fprintf(stderr, "Execution time: %f\n", milliseconds); fprintf(stderr, "\n"); #endif } ///////////////////////// CPU Template declaration #define COMPUTE_KEYS_TEMP_DECL(Dims) \ template void mccnn::compute_keys_gpu<Dims>( \ std::unique_ptr<IGPUDevice>& pDevice, \ const unsigned int pNumPts, \ const float* pInGPUPtrPts, \ const int* pInGPUPtrBatchIds, \ const float* pInGPUPtrSAABBMin, \ const int* pInGPUPtrNumCells, \ const float* pInGPUPtrInvCellSizes, \ mccnn::int64_m* pOutGPUPtrKeys); DECLARE_TEMPLATE_DIMS(COMPUTE_KEYS_TEMP_DECL)
1fb197bf408b4392de11c8de8ce73fa1768abd21.cu
///////////////////////////////////////////////////////////////////////////// /// Copyright 2020 Google LLC /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// https://www.apache.org/licenses/LICENSE-2.0 /// /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. ///////////////////////////////////////////////////////////////////////////// /// Modifications: pedro hermosilla ([email protected]) ///////////////////////////////////////////////////////////////////////////// #include "defines.hpp" #include "math_helper.cuh" #include "cuda_kernel_utils.cuh" #include "grid_utils.cuh" #include "compute_keys.cuh" ///////////////////////// GPU /** * GPU kernel to compute the keys of each point. * @param pNumPts Number of points. * @param pPts Array of points. * @param pBatchIds Array of batch ids. * @param pSAABBMin Array of scaled minimum point of bounding * boxes. * @param pNumCells Number of cells. * @param pInvCellSize Inverse cell size. * @param pOutKeys Output array with the point keys. */ template<int D> __global__ void compute_keys_gpu_kernel( const unsigned int pNumPts, const mccnn::fpoint<D>* __restrict__ pPts, const int* __restrict__ pBatchIds, const mccnn::fpoint<D>* __restrict__ pSAABBMin, const mccnn::ipoint<D>* __restrict__ pNumCells, const mccnn::fpoint<D>* __restrict__ pInvCellSize, mccnn::int64_m* __restrict__ pOutKeys) { int initPtIndex = mccnn::compute_global_index_gpu_funct(); int totalThreads = mccnn::compute_total_threads_gpu_funct(); for(int curPtIndex = initPtIndex; curPtIndex < pNumPts; curPtIndex += totalThreads) { //Get the values for the point. int curBatchId = pBatchIds[curPtIndex]; mccnn::fpoint<D> curPt = pPts[curPtIndex]; mccnn::fpoint<D> curSAABBMin = pSAABBMin[curBatchId]; //Compute the current cell indices. mccnn::ipoint<D> cell = mccnn::compute_cell_gpu_funct( curPt, curSAABBMin, pNumCells[0], pInvCellSize[0]); //Compute the key index of the cell. mccnn::int64_m keyIndex = mccnn::compute_key_gpu_funct( cell, pNumCells[0], curBatchId); //Save the key index. pOutKeys[curPtIndex] = keyIndex; } } ///////////////////////// CPU template<int D> void mccnn::compute_keys_gpu( std::unique_ptr<IGPUDevice>& pDevice, const unsigned int pNumPts, const float* pInGPUPtrPts, const int* pInGPUPtrBatchIds, const float* pInGPUPtrSAABBMin, const int* pInGPUPtrNumCells, const float* pInGPUPtrInvCellSizes, mccnn::int64_m* pOutGPUPtrKeys) { //Get the cuda stream. auto cudaStream = pDevice->getCUDAStream(); #ifdef DEBUG_INFO cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, cudaStream); #endif //Get the device properties. const GpuDeviceProperties& gpuProps = pDevice->get_device_properties(); //Calculate the ideal number of blocks for the selected block size. unsigned int numMP = gpuProps.numMPs_; unsigned int blockSize = gpuProps.warpSize_*2; unsigned int numBlocks = pDevice->get_max_active_block_x_sm( blockSize,(const void*)compute_keys_gpu_kernel<D>, 0); pDevice->check_error(__FILE__, __LINE__); //Calculate the total number of blocks to execute. unsigned int execBlocks = pNumPts/blockSize; execBlocks += (pNumPts%blockSize != 0)?1:0; unsigned int totalNumBlocks = numMP*numBlocks; totalNumBlocks = (totalNumBlocks > execBlocks)?execBlocks:totalNumBlocks; //Execute the cuda kernel. compute_keys_gpu_kernel<D><<<totalNumBlocks, blockSize, 0, cudaStream>>>( pNumPts, (const mccnn::fpoint<D>*)pInGPUPtrPts, pInGPUPtrBatchIds, (const mccnn::fpoint<D>*)pInGPUPtrSAABBMin, (const mccnn::ipoint<D>*)pInGPUPtrNumCells, (const mccnn::fpoint<D>*)pInGPUPtrInvCellSizes, pOutGPUPtrKeys); pDevice->check_error(__FILE__, __LINE__); #ifdef DEBUG_INFO cudaEventRecord(stop, cudaStream); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float gpuOccupancy = (float)(numBlocks*blockSize)/(float)gpuProps.maxThreadsXMP_; fprintf(stderr, "### COMPUTE KEYS ###\n"); fprintf(stderr, "Num points: %d\n", pNumPts); fprintf(stderr, "Occupancy: %f\n", gpuOccupancy); fprintf(stderr, "Execution time: %f\n", milliseconds); fprintf(stderr, "\n"); #endif } ///////////////////////// CPU Template declaration #define COMPUTE_KEYS_TEMP_DECL(Dims) \ template void mccnn::compute_keys_gpu<Dims>( \ std::unique_ptr<IGPUDevice>& pDevice, \ const unsigned int pNumPts, \ const float* pInGPUPtrPts, \ const int* pInGPUPtrBatchIds, \ const float* pInGPUPtrSAABBMin, \ const int* pInGPUPtrNumCells, \ const float* pInGPUPtrInvCellSizes, \ mccnn::int64_m* pOutGPUPtrKeys); DECLARE_TEMPLATE_DIMS(COMPUTE_KEYS_TEMP_DECL)
b70785ffb107c91fd0ecbdf2249b9220d740e7b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cassert> #include <cstdio> #include <cfloat> #include <cinttypes> #include <algorithm> #include <memory> #include <hiprand/hiprand_kernel.h> #include "private.h" #include "metric_abstraction.h" #include "tricks.cuh" #define BS_KMPP 1024 #define BS_AFKMC2_Q 512 #define BS_AFKMC2_R 512 #define BS_AFKMC2_MDT 512 #define BS_LL_ASS 128 #define BS_LL_CNT 256 #define BS_YY_INI 128 #define BS_YY_GFL 512 #define BS_YY_LFL 512 #define BLOCK_SIZE 1024 // for all the rest of the kernels #define SHMEM_AFKMC2_RC 8191 // in float-s, the actual value is +1 #define SHMEM_AFKMC2_MT 8192 #define YINYANG_GROUP_TOLERANCE 0.02 #define YINYANG_DRAFT_REASSIGNMENTS 0.11 #define YINYANG_REFRESH_EPSILON 1e-4 __device__ uint32_t d_changed_number; __device__ uint32_t d_passed_number; __constant__ uint32_t d_samples_size; __constant__ uint32_t d_clusters_size; __constant__ uint32_t d_yy_groups_size; __constant__ int d_shmem_size; //////////////////////---------------------------------------------------------- // Device functions //---------------------------------------------------------- //////////////////////---------------------------------------------------------- template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_plus_plus( const uint32_t offset, const uint32_t length, const uint32_t cc, const F *__restrict__ samples, const F *__restrict__ centroids, float *__restrict__ dists, atomic_float *__restrict__ dists_sum) { uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; float dist = 0; if (sample < length) { centroids += (cc - 1) * d_features_size; const uint32_t local_sample = sample + offset; if (_eq(samples[local_sample], samples[local_sample])) { dist = METRIC<M, F>::distance_t( samples, centroids, d_samples_size, local_sample); } float prev_dist; if (cc == 1 || dist < (prev_dist = dists[sample])) { dists[sample] = dist; } else { dist = prev_dist; } } dist = warpReduceSum(dist); if (threadIdx.x % 32 == 0) { atomicAdd(dists_sum, dist); } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_afkmc2_calc_q_dists( const uint32_t offset, const uint32_t length, uint32_t c1_index, const F *__restrict__ samples, float *__restrict__ dists, atomic_float *__restrict__ dsum) { volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x; float dist = 0; if (sample < length) { sample += offset; extern __shared__ float shmem_afkmc2[]; auto c1 = reinterpret_cast<F*>(shmem_afkmc2); uint16_t size_each = dupper(d_features_size, static_cast<uint16_t>(blockDim.x)); for (uint16_t i = size_each * threadIdx.x; i < min(size_each * (threadIdx.x + 1), d_features_size); i++) { c1[i] = samples[static_cast<uint64_t>(c1_index) * d_features_size + i]; } __syncthreads(); dist = METRIC<M, F>::distance_t(samples, c1, d_samples_size, sample); dist *= dist; dists[sample] = dist; } float sum = warpReduceSum(dist); if (threadIdx.x % 32 == 0) { atomicAdd(dsum, sum); } } __global__ void kmeans_afkmc2_calc_q( const uint32_t offset, const uint32_t length, float dsum, float *__restrict__ q) { volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } sample += offset; q[sample] = 1 / (2.f * d_samples_size) + q[sample] / (2 * dsum); } __global__ void kmeans_afkmc2_random_step( const uint32_t m, const uint64_t seed, const uint64_t seq, const float *__restrict__ q, uint32_t *__restrict__ choices, float *__restrict__ samples) { volatile uint32_t ti = blockIdx.x * blockDim.x + threadIdx.x; hiprandState_t state; hiprand_init(seed, ti, seq, &state); float part = hiprand_uniform(&state); if (ti < m) { samples[ti] = hiprand_uniform(&state); } float accum = 0, corr = 0; bool found = false; __shared__ float shared_q[SHMEM_AFKMC2_RC + 1]; int32_t *all_found = reinterpret_cast<int32_t*>(shared_q + SHMEM_AFKMC2_RC); *all_found = blockDim.x; const uint32_t size_each = dupper( static_cast<unsigned>(SHMEM_AFKMC2_RC), blockDim.x); for (uint32_t sample = 0; sample < d_samples_size; sample += SHMEM_AFKMC2_RC) { __syncthreads(); if (*all_found == 0) { return; } for (uint32_t i = 0, si = threadIdx.x * size_each; i < size_each && (si = threadIdx.x * size_each + i) < SHMEM_AFKMC2_RC && (sample + si) < d_samples_size; i++) { shared_q[si] = q[sample + si]; } __syncthreads(); if (!found) { int i = 0; #pragma unroll 4 for (; i < SHMEM_AFKMC2_RC && accum < part && sample + i < d_samples_size; i++) { // Kahan summation with inverted c float y = _add(corr, shared_q[i]); float t = accum + y; corr = y - (t - accum); accum = t; } if (accum >= part) { if (ti < m) { choices[ti] = sample + i - 1; } found = true; atomicSub(all_found, 1); } } } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_afkmc2_min_dist( const uint32_t m, const uint32_t k, const F *__restrict__ samples, const uint32_t *__restrict__ choices, const F *__restrict__ centroids, float *__restrict__ min_dists) { uint32_t chi = blockIdx.x * blockDim.x + threadIdx.x; if (chi >= m) { return; } float min_dist = FLT_MAX; for (uint32_t c = 0; c < k; c++) { float dist = METRIC<M, F>::distance_t( samples, centroids + c * d_features_size, d_samples_size, choices[chi]); if (dist < min_dist) { min_dist = dist; } } min_dists[chi] = min_dist * min_dist; } // min_dists must be set to FLT_MAX or +inf or NAN! template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_afkmc2_min_dist_transposed( const uint32_t m, const uint32_t k, const F *__restrict__ samples, const uint32_t *__restrict__ choices, const F *__restrict__ centroids, float *__restrict__ min_dists) { uint32_t c = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ float shared_min_dists[]; uint32_t size_each = dupper(m, blockDim.x); for (uint32_t i = size_each * threadIdx.x; i < min(size_each * (threadIdx.x + 1), m); i++) { shared_min_dists[i] = FLT_MAX; } __syncthreads(); for (uint32_t chi = 0; chi < m; chi++) { float dist = FLT_MAX; if (c < k) { dist = METRIC<M, F>::distance_t( samples, centroids + c * d_features_size, d_samples_size, choices[chi]); } float warp_min = warpReduceMin(dist); warp_min *= warp_min; if (threadIdx.x % 32 == 0 && c < k) { atomicMin(shared_min_dists + chi, warp_min); } } __syncthreads(); if (threadIdx.x == 0) { for (uint32_t chi = 0; chi < m; chi++) { atomicMin(min_dists + chi, shared_min_dists[chi]); } } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_assign_lloyd_smallc( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, uint32_t *__restrict__ assignments_prev, uint32_t * __restrict__ assignments) { using HF = typename HALF<F>::type; uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } HF min_dist = _fmax<HF>(); uint32_t nearest = UINT32_MAX; extern __shared__ float _shared_samples[]; F *shared_samples = reinterpret_cast<F *>(_shared_samples); F *shared_centroids = shared_samples + blockDim.x * d_features_size; const uint32_t cstep = (d_shmem_size - blockDim.x * d_features_size) / (d_features_size + 1); F *csqrs = shared_centroids + cstep * d_features_size; const uint32_t size_each = cstep / min(blockDim.x, length - blockIdx.x * blockDim.x) + 1; const uint32_t local_sample = sample + offset; bool insane = _neq(samples[local_sample], samples[local_sample]); const uint32_t soffset = threadIdx.x * d_features_size; if (!insane) { for (uint64_t f = 0; f < d_features_size; f++) { shared_samples[soffset + f] = samples[f * d_samples_size + local_sample]; } } for (uint32_t gc = 0; gc < d_clusters_size; gc += cstep) { uint32_t coffset = gc * d_features_size; __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t ci = threadIdx.x * size_each + i; uint32_t local_offset = ci * d_features_size; uint32_t global_offset = coffset + local_offset; if (global_offset < d_clusters_size * d_features_size && ci < cstep) { csqrs[ci] = METRIC<M, F>::sum_squares( centroids + global_offset, shared_centroids + local_offset); } } __syncthreads(); if (insane) { continue; } for (uint32_t c = gc; c < gc + cstep && c < d_clusters_size; c++) { F product = _const<F>(0), corr = _const<F>(0); coffset = (c - gc) * d_features_size; #pragma unroll 4 for (int f = 0; f < d_features_size; f++) { F y = _fma(corr, shared_samples[soffset + f], shared_centroids[coffset + f]); F t = _add(product, y); corr = _sub(y, _sub(t, product)); product = t; } HF dist = METRIC<M, F>::distance(_const<F>(0), csqrs[c - gc], product); if (_lt(dist, min_dist)) { min_dist = dist; nearest = c; } } } if (nearest == UINT32_MAX) { if (!insane) { #ifndef R_DEBUG printf("CUDA kernel kmeans_assign: nearest neighbor search failed for " "sample %" PRIu32 "\n", sample); #endif return; } else { nearest = d_clusters_size; } } uint32_t ass = assignments[sample]; assignments_prev[sample] = ass; if (ass != nearest) { assignments[sample] = nearest; atomicAggInc(&d_changed_number); } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_assign_lloyd( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, uint32_t *__restrict__ assignments_prev, uint32_t * __restrict__ assignments) { using HF = typename HALF<F>::type; uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } HF min_dist = _fmax<HF>(); uint32_t nearest = UINT32_MAX; extern __shared__ float _shared_centroids[]; F *shared_centroids = reinterpret_cast<F *>(_shared_centroids); const uint32_t cstep = d_shmem_size / (d_features_size + 1); F *csqrs = shared_centroids + cstep * d_features_size; const uint32_t size_each = cstep / min(blockDim.x, length - blockIdx.x * blockDim.x) + 1; const uint32_t local_sample = sample + offset; bool insane = _neq(samples[local_sample], samples[local_sample]); for (uint32_t gc = 0; gc < d_clusters_size; gc += cstep) { uint32_t coffset = gc * d_features_size; __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t ci = threadIdx.x * size_each + i; uint32_t local_offset = ci * d_features_size; uint32_t global_offset = coffset + local_offset; if (global_offset < d_clusters_size * d_features_size && ci < cstep) { csqrs[ci] = METRIC<M, F>::sum_squares( centroids + global_offset, shared_centroids + local_offset); } } __syncthreads(); if (insane) { continue; } for (uint32_t c = gc; c < gc + cstep && c < d_clusters_size; c++) { F product = _const<F>(0), corr = _const<F>(0); coffset = (c - gc) * d_features_size; #pragma unroll 4 for (uint64_t f = 0; f < d_features_size; f++) { F y = _fma(corr, samples[static_cast<uint64_t>(d_samples_size) * f + local_sample], shared_centroids[coffset + f]); F t = _add(product, y); corr = _sub(y, _sub(t, product)); product = t; } HF dist = METRIC<M, F>::distance(_const<F>(0), csqrs[c - gc], product); if (_lt(dist, min_dist)) { min_dist = dist; nearest = c; } } } if (nearest == UINT32_MAX) { if (!insane) { #ifndef R_DEBUG printf("CUDA kernel kmeans_assign: nearest neighbor search failed for " "sample %" PRIu32 "\n", sample); #endif return; } else { nearest = d_clusters_size; } } uint32_t ass = assignments[sample]; assignments_prev[sample] = ass; if (ass != nearest) { assignments[sample] = nearest; atomicAggInc(&d_changed_number); } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_adjust( const uint32_t coffset, const uint32_t length, const F *__restrict__ samples, const uint32_t *__restrict__ assignments_prev, const uint32_t *__restrict__ assignments, F *__restrict__ centroids, uint32_t *__restrict__ ccounts) { uint32_t c = blockIdx.x * blockDim.x + threadIdx.x; if (c >= length) { return; } c += coffset; uint32_t my_count = ccounts[c]; { F fmy_count = _const<F>(my_count); centroids += c * d_features_size; for (int f = 0; f < d_features_size; f++) { centroids[f] = _mul(centroids[f], fmy_count); } } extern __shared__ uint32_t ass[]; int step = d_shmem_size / 2; F corr = _const<F>(0); for (uint32_t sbase = 0; sbase < d_samples_size; sbase += step) { __syncthreads(); if (threadIdx.x == 0) { int pos = sbase; for (int i = 0; i < step && sbase + i < d_samples_size; i++) { ass[2 * i] = assignments[pos + i]; ass[2 * i + 1] = assignments_prev[pos + i]; } } __syncthreads(); for (int i = 0; i < step && sbase + i < d_samples_size; i++) { uint32_t this_ass = ass[2 * i]; uint32_t prev_ass = ass[2 * i + 1]; int sign = 0; if (prev_ass == c && this_ass != c) { sign = -1; my_count--; } else if (prev_ass != c && this_ass == c) { sign = 1; my_count++; } if (sign != 0) { F fsign = _const<F>(sign); #pragma unroll 4 for (uint64_t f = 0; f < d_features_size; f++) { F centroid = centroids[f]; F y = _fma(corr, samples[static_cast<uint64_t>(d_samples_size) * f + sbase + i], fsign); F t = _add(centroid, y); corr = _sub(y, _sub(t, centroid)); centroids[f] = t; } } } } // my_count can be 0 => we get NaN with L2 and never use this cluster again // this is a feature, not a bug METRIC<M, F>::normalize(my_count, centroids); ccounts[c] = my_count; } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_yy_init( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, const uint32_t *__restrict__ assignments, const uint32_t *__restrict__ groups, float *__restrict__ volatile bounds) { volatile uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } for (uint32_t i = 0; i < d_yy_groups_size + 1; i++) { bounds[static_cast<uint64_t>(length) * i + sample] = FLT_MAX; } uint32_t nearest = assignments[sample]; extern __shared__ float shared_memory[]; F *volatile shared_centroids = reinterpret_cast<F*>(shared_memory); const uint32_t cstep = d_shmem_size / d_features_size; const uint32_t size_each = cstep / min(blockDim.x, length - blockIdx.x * blockDim.x) + 1; for (uint32_t gc = 0; gc < d_clusters_size; gc += cstep) { uint32_t coffset = gc * d_features_size; __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t ci = threadIdx.x * size_each + i; uint32_t local_offset = ci * d_features_size; uint32_t global_offset = coffset + local_offset; if (global_offset < d_clusters_size * d_features_size && ci < cstep) { #pragma unroll 4 for (int f = 0; f < d_features_size; f++) { shared_centroids[local_offset + f] = centroids[global_offset + f]; } } } __syncthreads(); for (uint32_t c = gc; c < gc + cstep && c < d_clusters_size; c++) { uint32_t group = groups[c]; if (group >= d_yy_groups_size) { // this may happen if the centroid is insane (NaN) continue; } float dist = METRIC<M, F>::distance_t( samples, shared_centroids + (c - gc) * d_features_size, d_samples_size, sample + offset); if (c != nearest) { uint64_t gindex = static_cast<uint64_t>(length) * (1 + group) + sample; if (dist < bounds[gindex]) { bounds[gindex] = dist; } } else { bounds[sample] = dist; } } } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_yy_calc_drifts( const uint32_t offset, const uint32_t length, const F *__restrict__ centroids, F *__restrict__ drifts) { uint32_t c = blockIdx.x * blockDim.x + threadIdx.x; if (c >= length) { return; } c += offset; uint32_t coffset = c * d_features_size; (reinterpret_cast<float *>(drifts))[d_clusters_size * d_features_size + c] = METRIC<M, F>::distance(centroids + coffset, drifts + coffset); } __global__ void kmeans_yy_find_group_max_drifts( const uint32_t offset, const uint32_t length, const uint32_t *__restrict__ groups, float *__restrict__ drifts) { uint32_t group = blockIdx.x * blockDim.x + threadIdx.x; if (group >= length) { return; } group += offset; const uint32_t doffset = d_clusters_size * d_features_size; const uint32_t step = d_shmem_size / 2; const uint32_t size_each = d_shmem_size / (2 * min(blockDim.x, length - blockIdx.x * blockDim.x)); extern __shared__ uint32_t shmem[]; float *cd = (float *)shmem; uint32_t *cg = shmem + step; float my_max = -FLT_MAX; for (uint32_t offset = 0; offset < d_clusters_size; offset += step) { __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t local_offset = threadIdx.x * size_each + i; uint32_t global_offset = offset + local_offset; if (global_offset < d_clusters_size && local_offset < step) { cd[local_offset] = drifts[doffset + global_offset]; cg[local_offset] = groups[global_offset]; } } __syncthreads(); for (uint32_t i = 0; i < step; i++) { if (cg[i] == group) { float d = cd[i]; if (my_max < d) { my_max = d; } } } } drifts[group] = my_max; } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_yy_global_filter( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, const uint32_t *__restrict__ groups, const float *__restrict__ drifts, const uint32_t *__restrict__ assignments, uint32_t *__restrict__ assignments_prev, float *__restrict__ bounds, uint32_t *__restrict__ passed) { volatile uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } uint32_t cluster = assignments[sample]; assignments_prev[sample] = cluster; float upper_bound = bounds[sample]; uint32_t doffset = d_clusters_size * d_features_size; float cluster_drift = drifts[doffset + cluster]; upper_bound += cluster_drift; float min_lower_bound = FLT_MAX; for (uint32_t g = 0; g < d_yy_groups_size; g++) { uint64_t gindex = static_cast<uint64_t>(length) * (1 + g) + sample; float lower_bound = bounds[gindex] - drifts[g]; bounds[gindex] = lower_bound; if (lower_bound < min_lower_bound) { min_lower_bound = lower_bound; } } // group filter try #1 if (min_lower_bound >= upper_bound) { bounds[sample] = upper_bound; return; } upper_bound = 0; upper_bound = METRIC<M, F>::distance_t( samples, centroids + cluster * d_features_size, d_samples_size, sample + offset); bounds[sample] = upper_bound; // group filter try #2 if (min_lower_bound >= upper_bound) { return; } // d'oh! passed[atomicAggInc(&d_passed_number)] = sample; } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_yy_local_filter( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const uint32_t *__restrict__ passed, const F *__restrict__ centroids, const uint32_t *__restrict__ groups, const float *__restrict__ drifts, uint32_t *__restrict__ assignments, float *__restrict__ bounds) { volatile uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= d_passed_number) { return; } sample = passed[sample]; float upper_bound = bounds[sample]; uint32_t cluster = assignments[sample]; uint32_t doffset = d_clusters_size * d_features_size; float min_dist = upper_bound, second_min_dist = FLT_MAX; uint32_t nearest = cluster; extern __shared__ float shared_memory[]; F *volatile shared_centroids = reinterpret_cast<F*>(shared_memory); const uint32_t cstep = d_shmem_size / d_features_size; const uint32_t size_each = cstep / min(blockDim.x, d_passed_number - blockIdx.x * blockDim.x) + 1; for (uint32_t gc = 0; gc < d_clusters_size; gc += cstep) { uint32_t coffset = gc * d_features_size; __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t ci = threadIdx.x * size_each + i; uint32_t local_offset = ci * d_features_size; uint32_t global_offset = coffset + local_offset; if (global_offset < d_clusters_size * d_features_size && ci < cstep) { #pragma unroll 4 for (int f = 0; f < d_features_size; f++) { shared_centroids[local_offset + f] = centroids[global_offset + f]; } } } __syncthreads(); for (uint32_t c = gc; c < gc + cstep && c < d_clusters_size; c++) { if (c == cluster) { continue; } uint32_t group = groups[c]; if (group >= d_yy_groups_size) { // this may happen if the centroid is insane (NaN) continue; } float lower_bound = bounds[ static_cast<uint64_t>(length) * (1 + group) + sample]; if (lower_bound >= upper_bound) { if (lower_bound < second_min_dist) { second_min_dist = lower_bound; } continue; } lower_bound += drifts[group] - drifts[doffset + c]; if (second_min_dist < lower_bound) { continue; } float dist = METRIC<M, F>::distance_t( samples, shared_centroids + (c - gc) * d_features_size, d_samples_size, sample + offset); if (dist < min_dist) { second_min_dist = min_dist; min_dist = dist; nearest = c; } else if (dist < second_min_dist) { second_min_dist = dist; } } } uint32_t nearest_group = groups[nearest]; uint32_t previous_group = groups[cluster]; bounds[static_cast<uint64_t>(length) * (1 + nearest_group) + sample] = second_min_dist; if (nearest_group != previous_group) { uint64_t gindex = static_cast<uint64_t>(length) * (1 + previous_group) + sample; float pb = bounds[gindex]; if (pb > upper_bound) { bounds[gindex] = upper_bound; } } bounds[sample] = min_dist; if (cluster != nearest) { assignments[sample] = nearest; atomicAggInc(&d_changed_number); } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_calc_average_distance( uint32_t offset, uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, const uint32_t *__restrict__ assignments, atomic_float *distance) { volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x; float dist = 0; if (sample < length) { sample += offset; dist = METRIC<M, F>::distance_t( samples, centroids + assignments[sample] * d_features_size, d_samples_size, sample); } float sum = warpReduceSum(dist); if (threadIdx.x % 32 == 0) { atomicAdd(distance, sum); } } ////////////////////------------------------------------------------------------ // Host functions //------------------------------------------------------------ ////////////////////------------------------------------------------------------ static int check_changed(int iter, float tolerance, uint32_t h_samples_size, const std::vector<int> &devs, int32_t verbosity) { uint32_t overall_changed = 0; FOR_EACH_DEV( uint32_t my_changed = 0; CUCH(hipMemcpyFromSymbol(&my_changed, d_changed_number, sizeof(my_changed)), kmcudaMemoryCopyError); overall_changed += my_changed; ); INFO("iteration %d: %" PRIu32 " reassignments\n", iter, overall_changed); if (overall_changed <= tolerance * h_samples_size) { return -1; } #ifndef R_DEBUG assert(overall_changed <= h_samples_size); #endif uint32_t zero = 0; FOR_EACH_DEV( CUCH(hipMemcpyToSymbolAsync(d_changed_number, &zero, sizeof(zero)), kmcudaMemoryCopyError); ); return kmcudaSuccess; } static KMCUDAResult prepare_mem( uint32_t h_samples_size, uint32_t h_clusters_size, bool resume, const std::vector<int> &devs, int verbosity, udevptrs<uint32_t> *ccounts, udevptrs<uint32_t> *assignments, udevptrs<uint32_t> *assignments_prev, std::vector<uint32_t> *shmem_sizes) { uint32_t zero = 0; shmem_sizes->clear(); FOR_EACH_DEVI( uint32_t h_shmem_size; CUCH(hipMemcpyFromSymbol(&h_shmem_size, d_shmem_size, sizeof(h_shmem_size)), kmcudaMemoryCopyError); shmem_sizes->push_back(h_shmem_size * sizeof(uint32_t)); CUCH(hipMemcpyToSymbolAsync(d_changed_number, &zero, sizeof(zero)), kmcudaMemoryCopyError); if (!resume) { CUCH(hipMemsetAsync((*ccounts)[devi].get(), 0, h_clusters_size * sizeof(uint32_t)), kmcudaRuntimeError); CUCH(hipMemsetAsync((*assignments)[devi].get(), 0xff, h_samples_size * sizeof(uint32_t)), kmcudaRuntimeError); CUCH(hipMemsetAsync((*assignments_prev)[devi].get(), 0xff, h_samples_size * sizeof(uint32_t)), kmcudaRuntimeError); } ); return kmcudaSuccess; } extern "C" { KMCUDAResult kmeans_cuda_setup( uint32_t h_samples_size, uint16_t h_features_size, uint32_t h_clusters_size, uint32_t h_yy_groups_size, const std::vector<int> &devs, int32_t verbosity) { FOR_EACH_DEV( CUCH(hipMemcpyToSymbol(d_samples_size, &h_samples_size, sizeof(h_samples_size)), kmcudaMemoryCopyError); CUCH(hipMemcpyToSymbol(d_features_size, &h_features_size, sizeof(h_features_size)), kmcudaMemoryCopyError); CUCH(hipMemcpyToSymbol(d_clusters_size, &h_clusters_size, sizeof(h_clusters_size)), kmcudaMemoryCopyError); CUCH(hipMemcpyToSymbol(d_yy_groups_size, &h_yy_groups_size, sizeof(h_yy_groups_size)), kmcudaMemoryCopyError); hipDeviceProp_t props; CUCH(hipGetDeviceProperties(&props, dev), kmcudaRuntimeError); int h_shmem_size = static_cast<int>(props.sharedMemPerBlock); DEBUG("GPU #%" PRIu32 " has %d bytes of shared memory per block\n", dev, h_shmem_size); h_shmem_size /= sizeof(uint32_t); CUCH(hipMemcpyToSymbol(d_shmem_size, &h_shmem_size, sizeof(h_shmem_size)), kmcudaMemoryCopyError); ); return kmcudaSuccess; } KMCUDAResult kmeans_cuda_plus_plus( uint32_t h_samples_size, uint32_t h_features_size, uint32_t cc, KMCUDADistanceMetric metric, const std::vector<int> &devs, int fp16x2, int verbosity, const udevptrs<float> &samples, udevptrs<float> *centroids, udevptrs<float> *dists, float *host_dists, atomic_float *dist_sum) { auto plan = distribute(h_samples_size, h_features_size * sizeof(float), devs); uint32_t max_len = 0; for (auto &p : plan) { auto len = std::get<1>(p); if (max_len < len) { max_len = len; } } udevptrs<atomic_float> dev_dists; CUMALLOC(dev_dists, sizeof(atomic_float)); CUMEMSET_ASYNC(dev_dists, 0, sizeof(atomic_float)); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; if (length == 0) { continue; } dim3 block(BS_KMPP, 1, 1); dim3 grid(upper(length, block.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_plus_plus), , dim3(grid), dim3(block), 0, 0, offset, length, cc, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*dists)[devi].get(), dev_dists[devi].get())); ); uint32_t dist_offset = 0; FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; dim3 block(BS_KMPP, 1, 1); dim3 grid(upper(length, block.x), 1, 1); CUCH(hipMemcpyAsync( host_dists + offset, (*dists)[devi].get(), length * sizeof(float), hipMemcpyDeviceToHost), kmcudaMemoryCopyError); dist_offset += grid.x; ); atomic_float sum = 0; FOR_EACH_DEVI( if (std::get<1>(plan[devi]) == 0) { continue; } atomic_float hdist; CUCH(hipMemcpy(&hdist, dev_dists[devi].get(), sizeof(atomic_float), hipMemcpyDeviceToHost), kmcudaMemoryCopyError); sum += hdist; ); *dist_sum = sum; return kmcudaSuccess; } KMCUDAResult kmeans_cuda_afkmc2_calc_q( uint32_t h_samples_size, uint32_t h_features_size, uint32_t firstc, KMCUDADistanceMetric metric, const std::vector<int> &devs, int fp16x2, int verbosity, const udevptrs<float> &samples, udevptrs<float> *d_q, float *h_q) { auto plan = distribute(h_samples_size, h_features_size * sizeof(float), devs); udevptrs<atomic_float> dev_dists; CUMALLOC(dev_dists, sizeof(atomic_float)); CUMEMSET_ASYNC(dev_dists, 0, sizeof(atomic_float)); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; if (length == 0) { continue; } dim3 block(BS_AFKMC2_Q, 1, 1); dim3 grid(upper(length, block.x), 1, 1); int shmem = ::max( BS_AFKMC2_Q, static_cast<int>(h_features_size)) * sizeof(float); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_afkmc2_calc_q_dists), , dim3(grid), dim3(block), shmem, 0, offset, length, firstc, reinterpret_cast<const F*>(samples[devi].get()), (*d_q)[devi].get(), dev_dists[devi].get())); ); atomic_float dists_sum = 0; FOR_EACH_DEVI( if (std::get<1>(plan[devi]) == 0) { continue; } atomic_float hdist; CUCH(hipMemcpy(&hdist, dev_dists[devi].get(), sizeof(atomic_float), hipMemcpyDeviceToHost), kmcudaMemoryCopyError); dists_sum += hdist; ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; if (length == 0) { continue; } dim3 block(BLOCK_SIZE, 1, 1); dim3 grid(upper(length, block.x), 1, 1); hipLaunchKernelGGL(( kmeans_afkmc2_calc_q), dim3(grid), dim3(block), 0, 0, offset, length, dists_sum, (*d_q)[devi].get()); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; CUCH(hipMemcpyAsync(h_q + offset, (*d_q)[devi].get() + offset, length * sizeof(float), hipMemcpyDeviceToHost), kmcudaMemoryCopyError); FOR_OTHER_DEVS( CUP2P(d_q, offset, length); ); ); SYNC_ALL_DEVS; return kmcudaSuccess; } KMCUDAResult kmeans_cuda_afkmc2_random_step( uint32_t k, uint32_t m, uint64_t seed, int verbosity, const float *q, uint32_t *d_choices, uint32_t *h_choices, float *d_samples, float *h_samples) { dim3 block(BS_AFKMC2_R, 1, 1); dim3 grid(upper(m, block.x), 1, 1); hipLaunchKernelGGL(( kmeans_afkmc2_random_step), dim3(grid), dim3(block), 0, 0, m, seed, k, q, d_choices, d_samples); CUCH(hipMemcpy(h_choices, d_choices, m * sizeof(uint32_t), hipMemcpyDeviceToHost), kmcudaMemoryCopyError); CUCH(hipMemcpy(h_samples, d_samples, m * sizeof(float), hipMemcpyDeviceToHost), kmcudaMemoryCopyError); return kmcudaSuccess; } KMCUDAResult kmeans_cuda_afkmc2_min_dist( uint32_t k, uint32_t m, KMCUDADistanceMetric metric, int fp16x2, int32_t verbosity, const float *samples, const uint32_t *choices, const float *centroids, float *d_min_dists, float *h_min_dists) { if (m > k || m > SHMEM_AFKMC2_MT) { dim3 block(BLOCK_SIZE, 1, 1); dim3 grid(upper(m, block.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_afkmc2_min_dist), , dim3(grid), dim3(block), 0, 0, m, k, reinterpret_cast<const F*>(samples), choices, reinterpret_cast<const F*>(centroids), d_min_dists)); } else { dim3 block(BS_AFKMC2_MDT, 1, 1); dim3 grid(upper(k, block.x), 1, 1); CUCH(hipMemsetAsync(d_min_dists, 0xff, m * sizeof(float)), kmcudaRuntimeError); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_afkmc2_min_dist_transposed), , dim3(grid), dim3(block), m * sizeof(float), 0, m, k, reinterpret_cast<const F*>(samples), choices, reinterpret_cast<const F*>(centroids), d_min_dists)); } CUCH(hipMemcpy(h_min_dists, d_min_dists, m * sizeof(float), hipMemcpyDeviceToHost), kmcudaMemoryCopyError); return kmcudaSuccess; } KMCUDAResult kmeans_cuda_lloyd( float tolerance, uint32_t h_samples_size, uint32_t h_clusters_size, uint16_t h_features_size, KMCUDADistanceMetric metric, bool resume, const std::vector<int> &devs, int fp16x2, int32_t verbosity, const udevptrs<float> &samples, udevptrs<float> *centroids, udevptrs<uint32_t> *ccounts, udevptrs<uint32_t> *assignments_prev, udevptrs<uint32_t> *assignments, int *iterations = nullptr) { std::vector<uint32_t> shmem_sizes; RETERR(prepare_mem(h_samples_size, h_clusters_size, resume, devs, verbosity, ccounts, assignments, assignments_prev, &shmem_sizes)); auto plans = distribute(h_samples_size, h_features_size * sizeof(float), devs); auto planc = distribute(h_clusters_size, h_features_size * sizeof(float), devs); if (verbosity > 1) { print_plan("plans", plans); print_plan("planc", planc); } dim3 sblock(BS_LL_ASS, 1, 1); dim3 cblock(BS_LL_CNT, 1, 1); for (int iter = 1; ; iter++) { if (!resume || iter > 1) { FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } dim3 sgrid(upper(length, sblock.x), 1, 1); int shmem_size = shmem_sizes[devi]; int64_t ssqrmem = sblock.x * h_features_size * sizeof(float); if (shmem_size > ssqrmem && shmem_size - ssqrmem >= static_cast<int>((h_features_size + 1) * sizeof(float))) { hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_assign_lloyd_smallc), , dim3(sgrid), dim3(sblock), shmem_size, 0, offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments_prev)[devi].get() + offset, (*assignments)[devi].get() + offset)); } else { hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_assign_lloyd), , dim3(sgrid), dim3(sblock), shmem_size, 0, offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments_prev)[devi].get() + offset, (*assignments)[devi].get() + offset)); } ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(assignments_prev, offset, length); CUP2P(assignments, offset, length); ); ); int status = check_changed(iter, tolerance, h_samples_size, devs, verbosity); if (status < kmcudaSuccess) { if (iterations) { *iterations = iter; } return kmcudaSuccess; } if (status != kmcudaSuccess) { return static_cast<KMCUDAResult>(status); } } FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } dim3 cgrid(upper(length, cblock.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_adjust), , dim3(cgrid), dim3(cblock), shmem_sizes[devi], 0, offset, length, reinterpret_cast<const F*>(samples[devi].get()), (*assignments_prev)[devi].get(), (*assignments)[devi].get(), reinterpret_cast<F*>((*centroids)[devi].get()), (*ccounts)[devi].get())); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(ccounts, offset, length); CUP2P(centroids, offset * h_features_size, length * h_features_size); ); ); } } KMCUDAResult kmeans_cuda_yy( float tolerance, uint32_t h_yy_groups_size, uint32_t h_samples_size, uint32_t h_clusters_size, uint16_t h_features_size, KMCUDADistanceMetric metric, const std::vector<int> &devs, int fp16x2, int32_t verbosity, const udevptrs<float> &samples, udevptrs<float> *centroids, udevptrs<uint32_t> *ccounts, udevptrs<uint32_t> *assignments_prev, udevptrs<uint32_t> *assignments, udevptrs<uint32_t> *assignments_yy, udevptrs<float> *centroids_yy, udevptrs<float> *bounds_yy, udevptrs<float> *drifts_yy, udevptrs<uint32_t> *passed_yy) { if (h_yy_groups_size == 0 || YINYANG_DRAFT_REASSIGNMENTS <= tolerance) { if (verbosity > 0) { if (h_yy_groups_size == 0) { #ifndef R_DEBUG printf("too few clusters for this yinyang_t => Lloyd\n"); #endif } else { #ifndef R_DEBUG printf("tolerance is too high (>= %.2f) => Lloyd\n", YINYANG_DRAFT_REASSIGNMENTS); #endif } } return kmeans_cuda_lloyd( tolerance, h_samples_size, h_clusters_size, h_features_size, metric, false, devs, fp16x2, verbosity, samples, centroids, ccounts, assignments_prev, assignments); } INFO("running Lloyd until reassignments drop below %" PRIu32 "\n", (uint32_t)(YINYANG_DRAFT_REASSIGNMENTS * h_samples_size)); int iter; RETERR(kmeans_cuda_lloyd( YINYANG_DRAFT_REASSIGNMENTS, h_samples_size, h_clusters_size, h_features_size, metric, false, devs, fp16x2, verbosity, samples, centroids, ccounts, assignments_prev, assignments, &iter)); if (check_changed(iter, tolerance, h_samples_size, devs, 0) < kmcudaSuccess) { return kmcudaSuccess; } // map each centroid to yinyang group -> assignments_yy FOR_EACH_DEV( CUCH(hipMemcpyToSymbol(d_samples_size, &h_clusters_size, sizeof(h_samples_size)), kmcudaMemoryCopyError); CUCH(hipMemcpyToSymbol(d_clusters_size, &h_yy_groups_size, sizeof(h_yy_groups_size)), kmcudaMemoryCopyError); ); { udevptrs<float> tmpbufs, tmpbufs2; auto max_slength = max_distribute_length( h_samples_size, h_features_size * sizeof(float), devs); for (auto &pyy : *passed_yy) { // max_slength is guaranteed to be greater than or equal to // h_clusters_size + h_yy_groups_size tmpbufs.emplace_back(reinterpret_cast<float*>(pyy.get()) + max_slength - h_clusters_size - h_yy_groups_size, true); tmpbufs2.emplace_back(tmpbufs.back().get() + h_clusters_size, true); } RETERR(cuda_transpose( h_clusters_size, h_features_size, true, devs, verbosity, centroids)); RETERR(kmeans_init_centroids( kmcudaInitMethodPlusPlus, nullptr, h_clusters_size, h_features_size, h_yy_groups_size, metric, 0, devs, -1, fp16x2, verbosity, nullptr, *centroids, &tmpbufs, nullptr, centroids_yy), INFO("kmeans_init_centroids() failed for yinyang groups: %s\n", hipGetErrorString(hipGetLastError()))); RETERR(kmeans_cuda_lloyd( YINYANG_GROUP_TOLERANCE, h_clusters_size, h_yy_groups_size, h_features_size, metric, false, devs, fp16x2, verbosity, *centroids, centroids_yy, reinterpret_cast<udevptrs<uint32_t> *>(&tmpbufs2), reinterpret_cast<udevptrs<uint32_t> *>(&tmpbufs), assignments_yy)); RETERR(cuda_transpose( h_clusters_size, h_features_size, false, devs, verbosity, centroids)); } FOR_EACH_DEV( CUCH(hipMemcpyToSymbol(d_samples_size, &h_samples_size, sizeof(h_samples_size)), kmcudaMemoryCopyError); CUCH(hipMemcpyToSymbol(d_clusters_size, &h_clusters_size, sizeof(h_clusters_size)), kmcudaMemoryCopyError); ); std::vector<uint32_t> shmem_sizes; RETERR(prepare_mem(h_samples_size, h_clusters_size, true, devs, verbosity, ccounts, assignments, assignments_prev, &shmem_sizes)); dim3 siblock(BS_YY_INI, 1, 1); dim3 sgblock(BS_YY_GFL, 1, 1); dim3 slblock(BS_YY_LFL, 1, 1); dim3 cblock(BS_LL_CNT, 1, 1); dim3 gblock(BLOCK_SIZE, 1, 1); auto plans = distribute(h_samples_size, h_features_size * sizeof(float), devs); auto planc = distribute(h_clusters_size, h_features_size * sizeof(float), devs); auto plang = distribute(h_yy_groups_size, h_features_size * sizeof(float), devs); if (verbosity > 1) { print_plan("plans", plans); print_plan("planc", planc); print_plan("plang", plang); } bool refresh = true; uint32_t h_passed_number = 0; for (; ; iter++) { if (!refresh) { int status = check_changed(iter, tolerance, h_samples_size, devs, verbosity); if (status < kmcudaSuccess) { return kmcudaSuccess; } if (status != kmcudaSuccess) { return static_cast<KMCUDAResult>(status); } FOR_EACH_DEV( uint32_t local_passed; CUCH(hipMemcpyFromSymbol(&local_passed, d_passed_number, sizeof(h_passed_number)), kmcudaMemoryCopyError); h_passed_number += local_passed; ); DEBUG("passed number: %" PRIu32 "\n", h_passed_number); if (1.f - (h_passed_number + 0.f) / h_samples_size < YINYANG_REFRESH_EPSILON) { refresh = true; } h_passed_number = 0; } if (refresh) { INFO("refreshing Yinyang bounds...\n"); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } dim3 sigrid(upper(length, siblock.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_yy_init), , dim3(sigrid), dim3(siblock), shmem_sizes[devi], 0, offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments)[devi].get() + offset, (*assignments_yy)[devi].get(), (*bounds_yy)[devi].get())); ); refresh = false; } CUMEMCPY_D2D_ASYNC(*drifts_yy, 0, *centroids, 0, h_clusters_size * h_features_size); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } dim3 cgrid(upper(length, cblock.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_adjust), , dim3(cgrid), dim3(cblock), shmem_sizes[devi], 0, offset, length, reinterpret_cast<const F*>(samples[devi].get()), (*assignments_prev)[devi].get(), (*assignments)[devi].get(), reinterpret_cast<F*>((*centroids)[devi].get()), (*ccounts)[devi].get())); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(ccounts, offset, length); CUP2P(centroids, offset * h_features_size, length * h_features_size); ); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } dim3 cgrid(upper(length, cblock.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_yy_calc_drifts), , dim3(cgrid), dim3(cblock), 0, 0, offset, length, reinterpret_cast<const F*>((*centroids)[devi].get()), reinterpret_cast<F*>((*drifts_yy)[devi].get()))); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(drifts_yy, h_clusters_size * h_features_size + offset, length); ); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plang[devi]; if (length == 0) { continue; } dim3 ggrid(upper(length, gblock.x), 1, 1); hipLaunchKernelGGL(( kmeans_yy_find_group_max_drifts), dim3(ggrid), dim3(gblock), shmem_sizes[devi], 0, offset, length, (*assignments_yy)[devi].get(), (*drifts_yy)[devi].get()); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plang[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(drifts_yy, offset, length); ); ); FOR_EACH_DEV( CUCH(hipMemcpyToSymbolAsync(d_passed_number, &h_passed_number, sizeof(h_passed_number)), kmcudaMemoryCopyError); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } dim3 sggrid(upper(length, sgblock.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_yy_global_filter), , dim3(sggrid), dim3(sgblock), 0, 0, offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments_yy)[devi].get(), (*drifts_yy)[devi].get(), (*assignments)[devi].get() + offset, (*assignments_prev)[devi].get() + offset, (*bounds_yy)[devi].get(), (*passed_yy)[devi].get())); dim3 slgrid(upper(length, slblock.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_yy_local_filter), , dim3(slgrid), dim3(slblock), shmem_sizes[devi], 0, offset, length, reinterpret_cast<const F*>(samples[devi].get()), (*passed_yy)[devi].get(), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments_yy)[devi].get(), (*drifts_yy)[devi].get(), (*assignments)[devi].get() + offset, (*bounds_yy)[devi].get())); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(assignments_prev, offset, length); CUP2P(assignments, offset, length); ); ); } } KMCUDAResult kmeans_cuda_calc_average_distance( uint32_t h_samples_size, uint16_t h_features_size, KMCUDADistanceMetric metric, const std::vector<int> &devs, int fp16x2, int32_t verbosity, const udevptrs<float> &samples, const udevptrs<float> &centroids, const udevptrs<uint32_t> &assignments, float *average_distance) { INFO("calculating the average distance...\n"); auto plans = distribute(h_samples_size, h_features_size * sizeof(float), devs); udevptrs<atomic_float> dev_dists; CUMALLOC(dev_dists, sizeof(atomic_float)); CUMEMSET_ASYNC(dev_dists, 0, sizeof(atomic_float)); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } dim3 block(BLOCK_SIZE, 1, 1); dim3 grid(upper(length, block.x), 1, 1); hipLaunchKernelGGL(( KERNEL_SWITCH(kmeans_calc_average_distance), , dim3(grid), dim3(block), block.x * sizeof(float), 0, offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>(centroids[devi].get()), assignments[devi].get(), dev_dists[devi].get())); ); atomic_float sum = 0; FOR_EACH_DEVI( atomic_float hdist; CUCH(hipMemcpy(&hdist, dev_dists[devi].get(), sizeof(atomic_float), hipMemcpyDeviceToHost), kmcudaMemoryCopyError); sum += hdist; ); *average_distance = sum / h_samples_size; return kmcudaSuccess; } } // extern "C"
b70785ffb107c91fd0ecbdf2249b9220d740e7b1.cu
#include <cassert> #include <cstdio> #include <cfloat> #include <cinttypes> #include <algorithm> #include <memory> #include <curand_kernel.h> #include "private.h" #include "metric_abstraction.h" #include "tricks.cuh" #define BS_KMPP 1024 #define BS_AFKMC2_Q 512 #define BS_AFKMC2_R 512 #define BS_AFKMC2_MDT 512 #define BS_LL_ASS 128 #define BS_LL_CNT 256 #define BS_YY_INI 128 #define BS_YY_GFL 512 #define BS_YY_LFL 512 #define BLOCK_SIZE 1024 // for all the rest of the kernels #define SHMEM_AFKMC2_RC 8191 // in float-s, the actual value is +1 #define SHMEM_AFKMC2_MT 8192 #define YINYANG_GROUP_TOLERANCE 0.02 #define YINYANG_DRAFT_REASSIGNMENTS 0.11 #define YINYANG_REFRESH_EPSILON 1e-4 __device__ uint32_t d_changed_number; __device__ uint32_t d_passed_number; __constant__ uint32_t d_samples_size; __constant__ uint32_t d_clusters_size; __constant__ uint32_t d_yy_groups_size; __constant__ int d_shmem_size; //////////////////////---------------------------------------------------------- // Device functions //---------------------------------------------------------- //////////////////////---------------------------------------------------------- template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_plus_plus( const uint32_t offset, const uint32_t length, const uint32_t cc, const F *__restrict__ samples, const F *__restrict__ centroids, float *__restrict__ dists, atomic_float *__restrict__ dists_sum) { uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; float dist = 0; if (sample < length) { centroids += (cc - 1) * d_features_size; const uint32_t local_sample = sample + offset; if (_eq(samples[local_sample], samples[local_sample])) { dist = METRIC<M, F>::distance_t( samples, centroids, d_samples_size, local_sample); } float prev_dist; if (cc == 1 || dist < (prev_dist = dists[sample])) { dists[sample] = dist; } else { dist = prev_dist; } } dist = warpReduceSum(dist); if (threadIdx.x % 32 == 0) { atomicAdd(dists_sum, dist); } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_afkmc2_calc_q_dists( const uint32_t offset, const uint32_t length, uint32_t c1_index, const F *__restrict__ samples, float *__restrict__ dists, atomic_float *__restrict__ dsum) { volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x; float dist = 0; if (sample < length) { sample += offset; extern __shared__ float shmem_afkmc2[]; auto c1 = reinterpret_cast<F*>(shmem_afkmc2); uint16_t size_each = dupper(d_features_size, static_cast<uint16_t>(blockDim.x)); for (uint16_t i = size_each * threadIdx.x; i < min(size_each * (threadIdx.x + 1), d_features_size); i++) { c1[i] = samples[static_cast<uint64_t>(c1_index) * d_features_size + i]; } __syncthreads(); dist = METRIC<M, F>::distance_t(samples, c1, d_samples_size, sample); dist *= dist; dists[sample] = dist; } float sum = warpReduceSum(dist); if (threadIdx.x % 32 == 0) { atomicAdd(dsum, sum); } } __global__ void kmeans_afkmc2_calc_q( const uint32_t offset, const uint32_t length, float dsum, float *__restrict__ q) { volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } sample += offset; q[sample] = 1 / (2.f * d_samples_size) + q[sample] / (2 * dsum); } __global__ void kmeans_afkmc2_random_step( const uint32_t m, const uint64_t seed, const uint64_t seq, const float *__restrict__ q, uint32_t *__restrict__ choices, float *__restrict__ samples) { volatile uint32_t ti = blockIdx.x * blockDim.x + threadIdx.x; curandState_t state; curand_init(seed, ti, seq, &state); float part = curand_uniform(&state); if (ti < m) { samples[ti] = curand_uniform(&state); } float accum = 0, corr = 0; bool found = false; __shared__ float shared_q[SHMEM_AFKMC2_RC + 1]; int32_t *all_found = reinterpret_cast<int32_t*>(shared_q + SHMEM_AFKMC2_RC); *all_found = blockDim.x; const uint32_t size_each = dupper( static_cast<unsigned>(SHMEM_AFKMC2_RC), blockDim.x); for (uint32_t sample = 0; sample < d_samples_size; sample += SHMEM_AFKMC2_RC) { __syncthreads(); if (*all_found == 0) { return; } for (uint32_t i = 0, si = threadIdx.x * size_each; i < size_each && (si = threadIdx.x * size_each + i) < SHMEM_AFKMC2_RC && (sample + si) < d_samples_size; i++) { shared_q[si] = q[sample + si]; } __syncthreads(); if (!found) { int i = 0; #pragma unroll 4 for (; i < SHMEM_AFKMC2_RC && accum < part && sample + i < d_samples_size; i++) { // Kahan summation with inverted c float y = _add(corr, shared_q[i]); float t = accum + y; corr = y - (t - accum); accum = t; } if (accum >= part) { if (ti < m) { choices[ti] = sample + i - 1; } found = true; atomicSub(all_found, 1); } } } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_afkmc2_min_dist( const uint32_t m, const uint32_t k, const F *__restrict__ samples, const uint32_t *__restrict__ choices, const F *__restrict__ centroids, float *__restrict__ min_dists) { uint32_t chi = blockIdx.x * blockDim.x + threadIdx.x; if (chi >= m) { return; } float min_dist = FLT_MAX; for (uint32_t c = 0; c < k; c++) { float dist = METRIC<M, F>::distance_t( samples, centroids + c * d_features_size, d_samples_size, choices[chi]); if (dist < min_dist) { min_dist = dist; } } min_dists[chi] = min_dist * min_dist; } // min_dists must be set to FLT_MAX or +inf or NAN! template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_afkmc2_min_dist_transposed( const uint32_t m, const uint32_t k, const F *__restrict__ samples, const uint32_t *__restrict__ choices, const F *__restrict__ centroids, float *__restrict__ min_dists) { uint32_t c = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ float shared_min_dists[]; uint32_t size_each = dupper(m, blockDim.x); for (uint32_t i = size_each * threadIdx.x; i < min(size_each * (threadIdx.x + 1), m); i++) { shared_min_dists[i] = FLT_MAX; } __syncthreads(); for (uint32_t chi = 0; chi < m; chi++) { float dist = FLT_MAX; if (c < k) { dist = METRIC<M, F>::distance_t( samples, centroids + c * d_features_size, d_samples_size, choices[chi]); } float warp_min = warpReduceMin(dist); warp_min *= warp_min; if (threadIdx.x % 32 == 0 && c < k) { atomicMin(shared_min_dists + chi, warp_min); } } __syncthreads(); if (threadIdx.x == 0) { for (uint32_t chi = 0; chi < m; chi++) { atomicMin(min_dists + chi, shared_min_dists[chi]); } } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_assign_lloyd_smallc( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, uint32_t *__restrict__ assignments_prev, uint32_t * __restrict__ assignments) { using HF = typename HALF<F>::type; uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } HF min_dist = _fmax<HF>(); uint32_t nearest = UINT32_MAX; extern __shared__ float _shared_samples[]; F *shared_samples = reinterpret_cast<F *>(_shared_samples); F *shared_centroids = shared_samples + blockDim.x * d_features_size; const uint32_t cstep = (d_shmem_size - blockDim.x * d_features_size) / (d_features_size + 1); F *csqrs = shared_centroids + cstep * d_features_size; const uint32_t size_each = cstep / min(blockDim.x, length - blockIdx.x * blockDim.x) + 1; const uint32_t local_sample = sample + offset; bool insane = _neq(samples[local_sample], samples[local_sample]); const uint32_t soffset = threadIdx.x * d_features_size; if (!insane) { for (uint64_t f = 0; f < d_features_size; f++) { shared_samples[soffset + f] = samples[f * d_samples_size + local_sample]; } } for (uint32_t gc = 0; gc < d_clusters_size; gc += cstep) { uint32_t coffset = gc * d_features_size; __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t ci = threadIdx.x * size_each + i; uint32_t local_offset = ci * d_features_size; uint32_t global_offset = coffset + local_offset; if (global_offset < d_clusters_size * d_features_size && ci < cstep) { csqrs[ci] = METRIC<M, F>::sum_squares( centroids + global_offset, shared_centroids + local_offset); } } __syncthreads(); if (insane) { continue; } for (uint32_t c = gc; c < gc + cstep && c < d_clusters_size; c++) { F product = _const<F>(0), corr = _const<F>(0); coffset = (c - gc) * d_features_size; #pragma unroll 4 for (int f = 0; f < d_features_size; f++) { F y = _fma(corr, shared_samples[soffset + f], shared_centroids[coffset + f]); F t = _add(product, y); corr = _sub(y, _sub(t, product)); product = t; } HF dist = METRIC<M, F>::distance(_const<F>(0), csqrs[c - gc], product); if (_lt(dist, min_dist)) { min_dist = dist; nearest = c; } } } if (nearest == UINT32_MAX) { if (!insane) { #ifndef R_DEBUG printf("CUDA kernel kmeans_assign: nearest neighbor search failed for " "sample %" PRIu32 "\n", sample); #endif return; } else { nearest = d_clusters_size; } } uint32_t ass = assignments[sample]; assignments_prev[sample] = ass; if (ass != nearest) { assignments[sample] = nearest; atomicAggInc(&d_changed_number); } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_assign_lloyd( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, uint32_t *__restrict__ assignments_prev, uint32_t * __restrict__ assignments) { using HF = typename HALF<F>::type; uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } HF min_dist = _fmax<HF>(); uint32_t nearest = UINT32_MAX; extern __shared__ float _shared_centroids[]; F *shared_centroids = reinterpret_cast<F *>(_shared_centroids); const uint32_t cstep = d_shmem_size / (d_features_size + 1); F *csqrs = shared_centroids + cstep * d_features_size; const uint32_t size_each = cstep / min(blockDim.x, length - blockIdx.x * blockDim.x) + 1; const uint32_t local_sample = sample + offset; bool insane = _neq(samples[local_sample], samples[local_sample]); for (uint32_t gc = 0; gc < d_clusters_size; gc += cstep) { uint32_t coffset = gc * d_features_size; __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t ci = threadIdx.x * size_each + i; uint32_t local_offset = ci * d_features_size; uint32_t global_offset = coffset + local_offset; if (global_offset < d_clusters_size * d_features_size && ci < cstep) { csqrs[ci] = METRIC<M, F>::sum_squares( centroids + global_offset, shared_centroids + local_offset); } } __syncthreads(); if (insane) { continue; } for (uint32_t c = gc; c < gc + cstep && c < d_clusters_size; c++) { F product = _const<F>(0), corr = _const<F>(0); coffset = (c - gc) * d_features_size; #pragma unroll 4 for (uint64_t f = 0; f < d_features_size; f++) { F y = _fma(corr, samples[static_cast<uint64_t>(d_samples_size) * f + local_sample], shared_centroids[coffset + f]); F t = _add(product, y); corr = _sub(y, _sub(t, product)); product = t; } HF dist = METRIC<M, F>::distance(_const<F>(0), csqrs[c - gc], product); if (_lt(dist, min_dist)) { min_dist = dist; nearest = c; } } } if (nearest == UINT32_MAX) { if (!insane) { #ifndef R_DEBUG printf("CUDA kernel kmeans_assign: nearest neighbor search failed for " "sample %" PRIu32 "\n", sample); #endif return; } else { nearest = d_clusters_size; } } uint32_t ass = assignments[sample]; assignments_prev[sample] = ass; if (ass != nearest) { assignments[sample] = nearest; atomicAggInc(&d_changed_number); } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_adjust( const uint32_t coffset, const uint32_t length, const F *__restrict__ samples, const uint32_t *__restrict__ assignments_prev, const uint32_t *__restrict__ assignments, F *__restrict__ centroids, uint32_t *__restrict__ ccounts) { uint32_t c = blockIdx.x * blockDim.x + threadIdx.x; if (c >= length) { return; } c += coffset; uint32_t my_count = ccounts[c]; { F fmy_count = _const<F>(my_count); centroids += c * d_features_size; for (int f = 0; f < d_features_size; f++) { centroids[f] = _mul(centroids[f], fmy_count); } } extern __shared__ uint32_t ass[]; int step = d_shmem_size / 2; F corr = _const<F>(0); for (uint32_t sbase = 0; sbase < d_samples_size; sbase += step) { __syncthreads(); if (threadIdx.x == 0) { int pos = sbase; for (int i = 0; i < step && sbase + i < d_samples_size; i++) { ass[2 * i] = assignments[pos + i]; ass[2 * i + 1] = assignments_prev[pos + i]; } } __syncthreads(); for (int i = 0; i < step && sbase + i < d_samples_size; i++) { uint32_t this_ass = ass[2 * i]; uint32_t prev_ass = ass[2 * i + 1]; int sign = 0; if (prev_ass == c && this_ass != c) { sign = -1; my_count--; } else if (prev_ass != c && this_ass == c) { sign = 1; my_count++; } if (sign != 0) { F fsign = _const<F>(sign); #pragma unroll 4 for (uint64_t f = 0; f < d_features_size; f++) { F centroid = centroids[f]; F y = _fma(corr, samples[static_cast<uint64_t>(d_samples_size) * f + sbase + i], fsign); F t = _add(centroid, y); corr = _sub(y, _sub(t, centroid)); centroids[f] = t; } } } } // my_count can be 0 => we get NaN with L2 and never use this cluster again // this is a feature, not a bug METRIC<M, F>::normalize(my_count, centroids); ccounts[c] = my_count; } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_yy_init( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, const uint32_t *__restrict__ assignments, const uint32_t *__restrict__ groups, float *__restrict__ volatile bounds) { volatile uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } for (uint32_t i = 0; i < d_yy_groups_size + 1; i++) { bounds[static_cast<uint64_t>(length) * i + sample] = FLT_MAX; } uint32_t nearest = assignments[sample]; extern __shared__ float shared_memory[]; F *volatile shared_centroids = reinterpret_cast<F*>(shared_memory); const uint32_t cstep = d_shmem_size / d_features_size; const uint32_t size_each = cstep / min(blockDim.x, length - blockIdx.x * blockDim.x) + 1; for (uint32_t gc = 0; gc < d_clusters_size; gc += cstep) { uint32_t coffset = gc * d_features_size; __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t ci = threadIdx.x * size_each + i; uint32_t local_offset = ci * d_features_size; uint32_t global_offset = coffset + local_offset; if (global_offset < d_clusters_size * d_features_size && ci < cstep) { #pragma unroll 4 for (int f = 0; f < d_features_size; f++) { shared_centroids[local_offset + f] = centroids[global_offset + f]; } } } __syncthreads(); for (uint32_t c = gc; c < gc + cstep && c < d_clusters_size; c++) { uint32_t group = groups[c]; if (group >= d_yy_groups_size) { // this may happen if the centroid is insane (NaN) continue; } float dist = METRIC<M, F>::distance_t( samples, shared_centroids + (c - gc) * d_features_size, d_samples_size, sample + offset); if (c != nearest) { uint64_t gindex = static_cast<uint64_t>(length) * (1 + group) + sample; if (dist < bounds[gindex]) { bounds[gindex] = dist; } } else { bounds[sample] = dist; } } } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_yy_calc_drifts( const uint32_t offset, const uint32_t length, const F *__restrict__ centroids, F *__restrict__ drifts) { uint32_t c = blockIdx.x * blockDim.x + threadIdx.x; if (c >= length) { return; } c += offset; uint32_t coffset = c * d_features_size; (reinterpret_cast<float *>(drifts))[d_clusters_size * d_features_size + c] = METRIC<M, F>::distance(centroids + coffset, drifts + coffset); } __global__ void kmeans_yy_find_group_max_drifts( const uint32_t offset, const uint32_t length, const uint32_t *__restrict__ groups, float *__restrict__ drifts) { uint32_t group = blockIdx.x * blockDim.x + threadIdx.x; if (group >= length) { return; } group += offset; const uint32_t doffset = d_clusters_size * d_features_size; const uint32_t step = d_shmem_size / 2; const uint32_t size_each = d_shmem_size / (2 * min(blockDim.x, length - blockIdx.x * blockDim.x)); extern __shared__ uint32_t shmem[]; float *cd = (float *)shmem; uint32_t *cg = shmem + step; float my_max = -FLT_MAX; for (uint32_t offset = 0; offset < d_clusters_size; offset += step) { __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t local_offset = threadIdx.x * size_each + i; uint32_t global_offset = offset + local_offset; if (global_offset < d_clusters_size && local_offset < step) { cd[local_offset] = drifts[doffset + global_offset]; cg[local_offset] = groups[global_offset]; } } __syncthreads(); for (uint32_t i = 0; i < step; i++) { if (cg[i] == group) { float d = cd[i]; if (my_max < d) { my_max = d; } } } } drifts[group] = my_max; } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_yy_global_filter( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, const uint32_t *__restrict__ groups, const float *__restrict__ drifts, const uint32_t *__restrict__ assignments, uint32_t *__restrict__ assignments_prev, float *__restrict__ bounds, uint32_t *__restrict__ passed) { volatile uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= length) { return; } uint32_t cluster = assignments[sample]; assignments_prev[sample] = cluster; float upper_bound = bounds[sample]; uint32_t doffset = d_clusters_size * d_features_size; float cluster_drift = drifts[doffset + cluster]; upper_bound += cluster_drift; float min_lower_bound = FLT_MAX; for (uint32_t g = 0; g < d_yy_groups_size; g++) { uint64_t gindex = static_cast<uint64_t>(length) * (1 + g) + sample; float lower_bound = bounds[gindex] - drifts[g]; bounds[gindex] = lower_bound; if (lower_bound < min_lower_bound) { min_lower_bound = lower_bound; } } // group filter try #1 if (min_lower_bound >= upper_bound) { bounds[sample] = upper_bound; return; } upper_bound = 0; upper_bound = METRIC<M, F>::distance_t( samples, centroids + cluster * d_features_size, d_samples_size, sample + offset); bounds[sample] = upper_bound; // group filter try #2 if (min_lower_bound >= upper_bound) { return; } // d'oh! passed[atomicAggInc(&d_passed_number)] = sample; } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_yy_local_filter( const uint32_t offset, const uint32_t length, const F *__restrict__ samples, const uint32_t *__restrict__ passed, const F *__restrict__ centroids, const uint32_t *__restrict__ groups, const float *__restrict__ drifts, uint32_t *__restrict__ assignments, float *__restrict__ bounds) { volatile uint32_t sample = blockIdx.x * blockDim.x + threadIdx.x; if (sample >= d_passed_number) { return; } sample = passed[sample]; float upper_bound = bounds[sample]; uint32_t cluster = assignments[sample]; uint32_t doffset = d_clusters_size * d_features_size; float min_dist = upper_bound, second_min_dist = FLT_MAX; uint32_t nearest = cluster; extern __shared__ float shared_memory[]; F *volatile shared_centroids = reinterpret_cast<F*>(shared_memory); const uint32_t cstep = d_shmem_size / d_features_size; const uint32_t size_each = cstep / min(blockDim.x, d_passed_number - blockIdx.x * blockDim.x) + 1; for (uint32_t gc = 0; gc < d_clusters_size; gc += cstep) { uint32_t coffset = gc * d_features_size; __syncthreads(); for (uint32_t i = 0; i < size_each; i++) { uint32_t ci = threadIdx.x * size_each + i; uint32_t local_offset = ci * d_features_size; uint32_t global_offset = coffset + local_offset; if (global_offset < d_clusters_size * d_features_size && ci < cstep) { #pragma unroll 4 for (int f = 0; f < d_features_size; f++) { shared_centroids[local_offset + f] = centroids[global_offset + f]; } } } __syncthreads(); for (uint32_t c = gc; c < gc + cstep && c < d_clusters_size; c++) { if (c == cluster) { continue; } uint32_t group = groups[c]; if (group >= d_yy_groups_size) { // this may happen if the centroid is insane (NaN) continue; } float lower_bound = bounds[ static_cast<uint64_t>(length) * (1 + group) + sample]; if (lower_bound >= upper_bound) { if (lower_bound < second_min_dist) { second_min_dist = lower_bound; } continue; } lower_bound += drifts[group] - drifts[doffset + c]; if (second_min_dist < lower_bound) { continue; } float dist = METRIC<M, F>::distance_t( samples, shared_centroids + (c - gc) * d_features_size, d_samples_size, sample + offset); if (dist < min_dist) { second_min_dist = min_dist; min_dist = dist; nearest = c; } else if (dist < second_min_dist) { second_min_dist = dist; } } } uint32_t nearest_group = groups[nearest]; uint32_t previous_group = groups[cluster]; bounds[static_cast<uint64_t>(length) * (1 + nearest_group) + sample] = second_min_dist; if (nearest_group != previous_group) { uint64_t gindex = static_cast<uint64_t>(length) * (1 + previous_group) + sample; float pb = bounds[gindex]; if (pb > upper_bound) { bounds[gindex] = upper_bound; } } bounds[sample] = min_dist; if (cluster != nearest) { assignments[sample] = nearest; atomicAggInc(&d_changed_number); } } template <KMCUDADistanceMetric M, typename F> __global__ void kmeans_calc_average_distance( uint32_t offset, uint32_t length, const F *__restrict__ samples, const F *__restrict__ centroids, const uint32_t *__restrict__ assignments, atomic_float *distance) { volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x; float dist = 0; if (sample < length) { sample += offset; dist = METRIC<M, F>::distance_t( samples, centroids + assignments[sample] * d_features_size, d_samples_size, sample); } float sum = warpReduceSum(dist); if (threadIdx.x % 32 == 0) { atomicAdd(distance, sum); } } ////////////////////------------------------------------------------------------ // Host functions //------------------------------------------------------------ ////////////////////------------------------------------------------------------ static int check_changed(int iter, float tolerance, uint32_t h_samples_size, const std::vector<int> &devs, int32_t verbosity) { uint32_t overall_changed = 0; FOR_EACH_DEV( uint32_t my_changed = 0; CUCH(cudaMemcpyFromSymbol(&my_changed, d_changed_number, sizeof(my_changed)), kmcudaMemoryCopyError); overall_changed += my_changed; ); INFO("iteration %d: %" PRIu32 " reassignments\n", iter, overall_changed); if (overall_changed <= tolerance * h_samples_size) { return -1; } #ifndef R_DEBUG assert(overall_changed <= h_samples_size); #endif uint32_t zero = 0; FOR_EACH_DEV( CUCH(cudaMemcpyToSymbolAsync(d_changed_number, &zero, sizeof(zero)), kmcudaMemoryCopyError); ); return kmcudaSuccess; } static KMCUDAResult prepare_mem( uint32_t h_samples_size, uint32_t h_clusters_size, bool resume, const std::vector<int> &devs, int verbosity, udevptrs<uint32_t> *ccounts, udevptrs<uint32_t> *assignments, udevptrs<uint32_t> *assignments_prev, std::vector<uint32_t> *shmem_sizes) { uint32_t zero = 0; shmem_sizes->clear(); FOR_EACH_DEVI( uint32_t h_shmem_size; CUCH(cudaMemcpyFromSymbol(&h_shmem_size, d_shmem_size, sizeof(h_shmem_size)), kmcudaMemoryCopyError); shmem_sizes->push_back(h_shmem_size * sizeof(uint32_t)); CUCH(cudaMemcpyToSymbolAsync(d_changed_number, &zero, sizeof(zero)), kmcudaMemoryCopyError); if (!resume) { CUCH(cudaMemsetAsync((*ccounts)[devi].get(), 0, h_clusters_size * sizeof(uint32_t)), kmcudaRuntimeError); CUCH(cudaMemsetAsync((*assignments)[devi].get(), 0xff, h_samples_size * sizeof(uint32_t)), kmcudaRuntimeError); CUCH(cudaMemsetAsync((*assignments_prev)[devi].get(), 0xff, h_samples_size * sizeof(uint32_t)), kmcudaRuntimeError); } ); return kmcudaSuccess; } extern "C" { KMCUDAResult kmeans_cuda_setup( uint32_t h_samples_size, uint16_t h_features_size, uint32_t h_clusters_size, uint32_t h_yy_groups_size, const std::vector<int> &devs, int32_t verbosity) { FOR_EACH_DEV( CUCH(cudaMemcpyToSymbol(d_samples_size, &h_samples_size, sizeof(h_samples_size)), kmcudaMemoryCopyError); CUCH(cudaMemcpyToSymbol(d_features_size, &h_features_size, sizeof(h_features_size)), kmcudaMemoryCopyError); CUCH(cudaMemcpyToSymbol(d_clusters_size, &h_clusters_size, sizeof(h_clusters_size)), kmcudaMemoryCopyError); CUCH(cudaMemcpyToSymbol(d_yy_groups_size, &h_yy_groups_size, sizeof(h_yy_groups_size)), kmcudaMemoryCopyError); cudaDeviceProp props; CUCH(cudaGetDeviceProperties(&props, dev), kmcudaRuntimeError); int h_shmem_size = static_cast<int>(props.sharedMemPerBlock); DEBUG("GPU #%" PRIu32 " has %d bytes of shared memory per block\n", dev, h_shmem_size); h_shmem_size /= sizeof(uint32_t); CUCH(cudaMemcpyToSymbol(d_shmem_size, &h_shmem_size, sizeof(h_shmem_size)), kmcudaMemoryCopyError); ); return kmcudaSuccess; } KMCUDAResult kmeans_cuda_plus_plus( uint32_t h_samples_size, uint32_t h_features_size, uint32_t cc, KMCUDADistanceMetric metric, const std::vector<int> &devs, int fp16x2, int verbosity, const udevptrs<float> &samples, udevptrs<float> *centroids, udevptrs<float> *dists, float *host_dists, atomic_float *dist_sum) { auto plan = distribute(h_samples_size, h_features_size * sizeof(float), devs); uint32_t max_len = 0; for (auto &p : plan) { auto len = std::get<1>(p); if (max_len < len) { max_len = len; } } udevptrs<atomic_float> dev_dists; CUMALLOC(dev_dists, sizeof(atomic_float)); CUMEMSET_ASYNC(dev_dists, 0, sizeof(atomic_float)); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; if (length == 0) { continue; } dim3 block(BS_KMPP, 1, 1); dim3 grid(upper(length, block.x), 1, 1); KERNEL_SWITCH(kmeans_plus_plus, <<<grid, block>>>( offset, length, cc, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*dists)[devi].get(), dev_dists[devi].get())); ); uint32_t dist_offset = 0; FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; dim3 block(BS_KMPP, 1, 1); dim3 grid(upper(length, block.x), 1, 1); CUCH(cudaMemcpyAsync( host_dists + offset, (*dists)[devi].get(), length * sizeof(float), cudaMemcpyDeviceToHost), kmcudaMemoryCopyError); dist_offset += grid.x; ); atomic_float sum = 0; FOR_EACH_DEVI( if (std::get<1>(plan[devi]) == 0) { continue; } atomic_float hdist; CUCH(cudaMemcpy(&hdist, dev_dists[devi].get(), sizeof(atomic_float), cudaMemcpyDeviceToHost), kmcudaMemoryCopyError); sum += hdist; ); *dist_sum = sum; return kmcudaSuccess; } KMCUDAResult kmeans_cuda_afkmc2_calc_q( uint32_t h_samples_size, uint32_t h_features_size, uint32_t firstc, KMCUDADistanceMetric metric, const std::vector<int> &devs, int fp16x2, int verbosity, const udevptrs<float> &samples, udevptrs<float> *d_q, float *h_q) { auto plan = distribute(h_samples_size, h_features_size * sizeof(float), devs); udevptrs<atomic_float> dev_dists; CUMALLOC(dev_dists, sizeof(atomic_float)); CUMEMSET_ASYNC(dev_dists, 0, sizeof(atomic_float)); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; if (length == 0) { continue; } dim3 block(BS_AFKMC2_Q, 1, 1); dim3 grid(upper(length, block.x), 1, 1); int shmem = std::max( BS_AFKMC2_Q, static_cast<int>(h_features_size)) * sizeof(float); KERNEL_SWITCH(kmeans_afkmc2_calc_q_dists, <<<grid, block, shmem>>>( offset, length, firstc, reinterpret_cast<const F*>(samples[devi].get()), (*d_q)[devi].get(), dev_dists[devi].get())); ); atomic_float dists_sum = 0; FOR_EACH_DEVI( if (std::get<1>(plan[devi]) == 0) { continue; } atomic_float hdist; CUCH(cudaMemcpy(&hdist, dev_dists[devi].get(), sizeof(atomic_float), cudaMemcpyDeviceToHost), kmcudaMemoryCopyError); dists_sum += hdist; ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; if (length == 0) { continue; } dim3 block(BLOCK_SIZE, 1, 1); dim3 grid(upper(length, block.x), 1, 1); kmeans_afkmc2_calc_q<<<grid, block>>>( offset, length, dists_sum, (*d_q)[devi].get()); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plan[devi]; CUCH(cudaMemcpyAsync(h_q + offset, (*d_q)[devi].get() + offset, length * sizeof(float), cudaMemcpyDeviceToHost), kmcudaMemoryCopyError); FOR_OTHER_DEVS( CUP2P(d_q, offset, length); ); ); SYNC_ALL_DEVS; return kmcudaSuccess; } KMCUDAResult kmeans_cuda_afkmc2_random_step( uint32_t k, uint32_t m, uint64_t seed, int verbosity, const float *q, uint32_t *d_choices, uint32_t *h_choices, float *d_samples, float *h_samples) { dim3 block(BS_AFKMC2_R, 1, 1); dim3 grid(upper(m, block.x), 1, 1); kmeans_afkmc2_random_step<<<grid, block>>>( m, seed, k, q, d_choices, d_samples); CUCH(cudaMemcpy(h_choices, d_choices, m * sizeof(uint32_t), cudaMemcpyDeviceToHost), kmcudaMemoryCopyError); CUCH(cudaMemcpy(h_samples, d_samples, m * sizeof(float), cudaMemcpyDeviceToHost), kmcudaMemoryCopyError); return kmcudaSuccess; } KMCUDAResult kmeans_cuda_afkmc2_min_dist( uint32_t k, uint32_t m, KMCUDADistanceMetric metric, int fp16x2, int32_t verbosity, const float *samples, const uint32_t *choices, const float *centroids, float *d_min_dists, float *h_min_dists) { if (m > k || m > SHMEM_AFKMC2_MT) { dim3 block(BLOCK_SIZE, 1, 1); dim3 grid(upper(m, block.x), 1, 1); KERNEL_SWITCH(kmeans_afkmc2_min_dist, <<<grid, block>>>( m, k, reinterpret_cast<const F*>(samples), choices, reinterpret_cast<const F*>(centroids), d_min_dists)); } else { dim3 block(BS_AFKMC2_MDT, 1, 1); dim3 grid(upper(k, block.x), 1, 1); CUCH(cudaMemsetAsync(d_min_dists, 0xff, m * sizeof(float)), kmcudaRuntimeError); KERNEL_SWITCH(kmeans_afkmc2_min_dist_transposed, <<<grid, block, m * sizeof(float)>>>( m, k, reinterpret_cast<const F*>(samples), choices, reinterpret_cast<const F*>(centroids), d_min_dists)); } CUCH(cudaMemcpy(h_min_dists, d_min_dists, m * sizeof(float), cudaMemcpyDeviceToHost), kmcudaMemoryCopyError); return kmcudaSuccess; } KMCUDAResult kmeans_cuda_lloyd( float tolerance, uint32_t h_samples_size, uint32_t h_clusters_size, uint16_t h_features_size, KMCUDADistanceMetric metric, bool resume, const std::vector<int> &devs, int fp16x2, int32_t verbosity, const udevptrs<float> &samples, udevptrs<float> *centroids, udevptrs<uint32_t> *ccounts, udevptrs<uint32_t> *assignments_prev, udevptrs<uint32_t> *assignments, int *iterations = nullptr) { std::vector<uint32_t> shmem_sizes; RETERR(prepare_mem(h_samples_size, h_clusters_size, resume, devs, verbosity, ccounts, assignments, assignments_prev, &shmem_sizes)); auto plans = distribute(h_samples_size, h_features_size * sizeof(float), devs); auto planc = distribute(h_clusters_size, h_features_size * sizeof(float), devs); if (verbosity > 1) { print_plan("plans", plans); print_plan("planc", planc); } dim3 sblock(BS_LL_ASS, 1, 1); dim3 cblock(BS_LL_CNT, 1, 1); for (int iter = 1; ; iter++) { if (!resume || iter > 1) { FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } dim3 sgrid(upper(length, sblock.x), 1, 1); int shmem_size = shmem_sizes[devi]; int64_t ssqrmem = sblock.x * h_features_size * sizeof(float); if (shmem_size > ssqrmem && shmem_size - ssqrmem >= static_cast<int>((h_features_size + 1) * sizeof(float))) { KERNEL_SWITCH(kmeans_assign_lloyd_smallc, <<<sgrid, sblock, shmem_size>>>( offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments_prev)[devi].get() + offset, (*assignments)[devi].get() + offset)); } else { KERNEL_SWITCH(kmeans_assign_lloyd, <<<sgrid, sblock, shmem_size>>>( offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments_prev)[devi].get() + offset, (*assignments)[devi].get() + offset)); } ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(assignments_prev, offset, length); CUP2P(assignments, offset, length); ); ); int status = check_changed(iter, tolerance, h_samples_size, devs, verbosity); if (status < kmcudaSuccess) { if (iterations) { *iterations = iter; } return kmcudaSuccess; } if (status != kmcudaSuccess) { return static_cast<KMCUDAResult>(status); } } FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } dim3 cgrid(upper(length, cblock.x), 1, 1); KERNEL_SWITCH(kmeans_adjust, <<<cgrid, cblock, shmem_sizes[devi]>>>( offset, length, reinterpret_cast<const F*>(samples[devi].get()), (*assignments_prev)[devi].get(), (*assignments)[devi].get(), reinterpret_cast<F*>((*centroids)[devi].get()), (*ccounts)[devi].get())); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(ccounts, offset, length); CUP2P(centroids, offset * h_features_size, length * h_features_size); ); ); } } KMCUDAResult kmeans_cuda_yy( float tolerance, uint32_t h_yy_groups_size, uint32_t h_samples_size, uint32_t h_clusters_size, uint16_t h_features_size, KMCUDADistanceMetric metric, const std::vector<int> &devs, int fp16x2, int32_t verbosity, const udevptrs<float> &samples, udevptrs<float> *centroids, udevptrs<uint32_t> *ccounts, udevptrs<uint32_t> *assignments_prev, udevptrs<uint32_t> *assignments, udevptrs<uint32_t> *assignments_yy, udevptrs<float> *centroids_yy, udevptrs<float> *bounds_yy, udevptrs<float> *drifts_yy, udevptrs<uint32_t> *passed_yy) { if (h_yy_groups_size == 0 || YINYANG_DRAFT_REASSIGNMENTS <= tolerance) { if (verbosity > 0) { if (h_yy_groups_size == 0) { #ifndef R_DEBUG printf("too few clusters for this yinyang_t => Lloyd\n"); #endif } else { #ifndef R_DEBUG printf("tolerance is too high (>= %.2f) => Lloyd\n", YINYANG_DRAFT_REASSIGNMENTS); #endif } } return kmeans_cuda_lloyd( tolerance, h_samples_size, h_clusters_size, h_features_size, metric, false, devs, fp16x2, verbosity, samples, centroids, ccounts, assignments_prev, assignments); } INFO("running Lloyd until reassignments drop below %" PRIu32 "\n", (uint32_t)(YINYANG_DRAFT_REASSIGNMENTS * h_samples_size)); int iter; RETERR(kmeans_cuda_lloyd( YINYANG_DRAFT_REASSIGNMENTS, h_samples_size, h_clusters_size, h_features_size, metric, false, devs, fp16x2, verbosity, samples, centroids, ccounts, assignments_prev, assignments, &iter)); if (check_changed(iter, tolerance, h_samples_size, devs, 0) < kmcudaSuccess) { return kmcudaSuccess; } // map each centroid to yinyang group -> assignments_yy FOR_EACH_DEV( CUCH(cudaMemcpyToSymbol(d_samples_size, &h_clusters_size, sizeof(h_samples_size)), kmcudaMemoryCopyError); CUCH(cudaMemcpyToSymbol(d_clusters_size, &h_yy_groups_size, sizeof(h_yy_groups_size)), kmcudaMemoryCopyError); ); { udevptrs<float> tmpbufs, tmpbufs2; auto max_slength = max_distribute_length( h_samples_size, h_features_size * sizeof(float), devs); for (auto &pyy : *passed_yy) { // max_slength is guaranteed to be greater than or equal to // h_clusters_size + h_yy_groups_size tmpbufs.emplace_back(reinterpret_cast<float*>(pyy.get()) + max_slength - h_clusters_size - h_yy_groups_size, true); tmpbufs2.emplace_back(tmpbufs.back().get() + h_clusters_size, true); } RETERR(cuda_transpose( h_clusters_size, h_features_size, true, devs, verbosity, centroids)); RETERR(kmeans_init_centroids( kmcudaInitMethodPlusPlus, nullptr, h_clusters_size, h_features_size, h_yy_groups_size, metric, 0, devs, -1, fp16x2, verbosity, nullptr, *centroids, &tmpbufs, nullptr, centroids_yy), INFO("kmeans_init_centroids() failed for yinyang groups: %s\n", cudaGetErrorString(cudaGetLastError()))); RETERR(kmeans_cuda_lloyd( YINYANG_GROUP_TOLERANCE, h_clusters_size, h_yy_groups_size, h_features_size, metric, false, devs, fp16x2, verbosity, *centroids, centroids_yy, reinterpret_cast<udevptrs<uint32_t> *>(&tmpbufs2), reinterpret_cast<udevptrs<uint32_t> *>(&tmpbufs), assignments_yy)); RETERR(cuda_transpose( h_clusters_size, h_features_size, false, devs, verbosity, centroids)); } FOR_EACH_DEV( CUCH(cudaMemcpyToSymbol(d_samples_size, &h_samples_size, sizeof(h_samples_size)), kmcudaMemoryCopyError); CUCH(cudaMemcpyToSymbol(d_clusters_size, &h_clusters_size, sizeof(h_clusters_size)), kmcudaMemoryCopyError); ); std::vector<uint32_t> shmem_sizes; RETERR(prepare_mem(h_samples_size, h_clusters_size, true, devs, verbosity, ccounts, assignments, assignments_prev, &shmem_sizes)); dim3 siblock(BS_YY_INI, 1, 1); dim3 sgblock(BS_YY_GFL, 1, 1); dim3 slblock(BS_YY_LFL, 1, 1); dim3 cblock(BS_LL_CNT, 1, 1); dim3 gblock(BLOCK_SIZE, 1, 1); auto plans = distribute(h_samples_size, h_features_size * sizeof(float), devs); auto planc = distribute(h_clusters_size, h_features_size * sizeof(float), devs); auto plang = distribute(h_yy_groups_size, h_features_size * sizeof(float), devs); if (verbosity > 1) { print_plan("plans", plans); print_plan("planc", planc); print_plan("plang", plang); } bool refresh = true; uint32_t h_passed_number = 0; for (; ; iter++) { if (!refresh) { int status = check_changed(iter, tolerance, h_samples_size, devs, verbosity); if (status < kmcudaSuccess) { return kmcudaSuccess; } if (status != kmcudaSuccess) { return static_cast<KMCUDAResult>(status); } FOR_EACH_DEV( uint32_t local_passed; CUCH(cudaMemcpyFromSymbol(&local_passed, d_passed_number, sizeof(h_passed_number)), kmcudaMemoryCopyError); h_passed_number += local_passed; ); DEBUG("passed number: %" PRIu32 "\n", h_passed_number); if (1.f - (h_passed_number + 0.f) / h_samples_size < YINYANG_REFRESH_EPSILON) { refresh = true; } h_passed_number = 0; } if (refresh) { INFO("refreshing Yinyang bounds...\n"); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } dim3 sigrid(upper(length, siblock.x), 1, 1); KERNEL_SWITCH(kmeans_yy_init, <<<sigrid, siblock, shmem_sizes[devi]>>>( offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments)[devi].get() + offset, (*assignments_yy)[devi].get(), (*bounds_yy)[devi].get())); ); refresh = false; } CUMEMCPY_D2D_ASYNC(*drifts_yy, 0, *centroids, 0, h_clusters_size * h_features_size); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } dim3 cgrid(upper(length, cblock.x), 1, 1); KERNEL_SWITCH(kmeans_adjust, <<<cgrid, cblock, shmem_sizes[devi]>>>( offset, length, reinterpret_cast<const F*>(samples[devi].get()), (*assignments_prev)[devi].get(), (*assignments)[devi].get(), reinterpret_cast<F*>((*centroids)[devi].get()), (*ccounts)[devi].get())); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(ccounts, offset, length); CUP2P(centroids, offset * h_features_size, length * h_features_size); ); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } dim3 cgrid(upper(length, cblock.x), 1, 1); KERNEL_SWITCH(kmeans_yy_calc_drifts, <<<cgrid, cblock>>>( offset, length, reinterpret_cast<const F*>((*centroids)[devi].get()), reinterpret_cast<F*>((*drifts_yy)[devi].get()))); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = planc[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(drifts_yy, h_clusters_size * h_features_size + offset, length); ); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plang[devi]; if (length == 0) { continue; } dim3 ggrid(upper(length, gblock.x), 1, 1); kmeans_yy_find_group_max_drifts<<<ggrid, gblock, shmem_sizes[devi]>>>( offset, length, (*assignments_yy)[devi].get(), (*drifts_yy)[devi].get()); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plang[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(drifts_yy, offset, length); ); ); FOR_EACH_DEV( CUCH(cudaMemcpyToSymbolAsync(d_passed_number, &h_passed_number, sizeof(h_passed_number)), kmcudaMemoryCopyError); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } dim3 sggrid(upper(length, sgblock.x), 1, 1); KERNEL_SWITCH(kmeans_yy_global_filter, <<<sggrid, sgblock>>>( offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments_yy)[devi].get(), (*drifts_yy)[devi].get(), (*assignments)[devi].get() + offset, (*assignments_prev)[devi].get() + offset, (*bounds_yy)[devi].get(), (*passed_yy)[devi].get())); dim3 slgrid(upper(length, slblock.x), 1, 1); KERNEL_SWITCH(kmeans_yy_local_filter, <<<slgrid, slblock, shmem_sizes[devi]>>>( offset, length, reinterpret_cast<const F*>(samples[devi].get()), (*passed_yy)[devi].get(), reinterpret_cast<const F*>((*centroids)[devi].get()), (*assignments_yy)[devi].get(), (*drifts_yy)[devi].get(), (*assignments)[devi].get() + offset, (*bounds_yy)[devi].get())); ); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } FOR_OTHER_DEVS( CUP2P(assignments_prev, offset, length); CUP2P(assignments, offset, length); ); ); } } KMCUDAResult kmeans_cuda_calc_average_distance( uint32_t h_samples_size, uint16_t h_features_size, KMCUDADistanceMetric metric, const std::vector<int> &devs, int fp16x2, int32_t verbosity, const udevptrs<float> &samples, const udevptrs<float> &centroids, const udevptrs<uint32_t> &assignments, float *average_distance) { INFO("calculating the average distance...\n"); auto plans = distribute(h_samples_size, h_features_size * sizeof(float), devs); udevptrs<atomic_float> dev_dists; CUMALLOC(dev_dists, sizeof(atomic_float)); CUMEMSET_ASYNC(dev_dists, 0, sizeof(atomic_float)); FOR_EACH_DEVI( uint32_t offset, length; std::tie(offset, length) = plans[devi]; if (length == 0) { continue; } dim3 block(BLOCK_SIZE, 1, 1); dim3 grid(upper(length, block.x), 1, 1); KERNEL_SWITCH(kmeans_calc_average_distance, <<<grid, block, block.x * sizeof(float)>>>( offset, length, reinterpret_cast<const F*>(samples[devi].get()), reinterpret_cast<const F*>(centroids[devi].get()), assignments[devi].get(), dev_dists[devi].get())); ); atomic_float sum = 0; FOR_EACH_DEVI( atomic_float hdist; CUCH(cudaMemcpy(&hdist, dev_dists[devi].get(), sizeof(atomic_float), cudaMemcpyDeviceToHost), kmcudaMemoryCopyError); sum += hdist; ); *average_distance = sum / h_samples_size; return kmcudaSuccess; } } // extern "C"
1e7ea52f86b6f1393531973949c34c6ec231b691.hip
// !!! This is a file automatically generated by hipify!!! /** * gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <hip/hip_runtime.h> #include "../../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define N 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < N; j++) { tmp[i] = A[i*N + j] * x[j] + tmp[i]; y[i] = B[i*N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void init(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* A_gpu, DATA_TYPE* x_gpu) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE) i) / N; x_gpu[i] = ((DATA_TYPE) i) / N; for (j = 0; j < N; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; A_gpu[i*N + j] = ((DATA_TYPE) i*j) / N; } } } void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<(N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int j; for(j = 0; j < N; j++) { tmp[i] += a[i * N + j] * x[j]; y[i] += b[i * N + j] * x[j]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void gesummvCuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* x_gpu, DATA_TYPE* y_gpu, DATA_TYPE* tmp_gpu) { hipStream_t stream1; hipStream_t stream2; hipStream_t stream3; hipStream_t stream4; hipStream_t stream5; hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipStreamCreate(&stream3); hipStreamCreate(&stream4); hipStreamCreate(&stream5); #ifdef PREF double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); hipMemPrefetchAsync(A_gpu,N*N*sizeof(DATA_TYPE), GPU_DEVICE, stream1 ); hipMemPrefetchAsync(B_gpu,N*N*sizeof(DATA_TYPE), GPU_DEVICE, stream2 ); hipMemPrefetchAsync(x_gpu,N*sizeof(DATA_TYPE), GPU_DEVICE, stream3 ); hipMemPrefetchAsync(y_gpu,N*sizeof(DATA_TYPE), GPU_DEVICE, stream4 ); hipMemPrefetchAsync(tmp_gpu,N*sizeof(DATA_TYPE), GPU_DEVICE, stream5 ); hipStreamSynchronize(stream1); hipStreamSynchronize(stream2); hipStreamSynchronize(stream3); hipStreamSynchronize(stream4); hipStreamSynchronize(stream5); t_start = rtclock(); for (int i = 0; i < 1; i++){ hipLaunchKernelGGL(( gesummv_kernel), dim3(grid), dim3(block), 0 , stream5, A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu); hipDeviceSynchronize(); } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #else double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); t_start = rtclock(); for (int i = 0; i < 1; i++){ hipLaunchKernelGGL(( gesummv_kernel), dim3(grid), dim3(block), 0, 0, A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu); hipDeviceSynchronize(); } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #endif } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* tmp; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); hipMallocManaged((void **)&A_gpu, sizeof(DATA_TYPE) * N * N); hipMallocManaged((void **)&B_gpu, sizeof(DATA_TYPE) * N * N); hipMallocManaged((void **)&x_gpu, sizeof(DATA_TYPE) * N); hipMallocManaged((void **)&y_gpu, sizeof(DATA_TYPE) * N); hipMallocManaged((void **)&tmp_gpu, sizeof(DATA_TYPE) * N); init(A, x, A_gpu, x_gpu); GPU_argv_init(); gesummvCuda(A_gpu, B_gpu, x_gpu, y_gpu, tmp_gpu); t_start = rtclock(); gesummv(A, B, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_gpu); free(A); free(B); free(x); free(y); free(tmp); hipFree(A_gpu); hipFree(B_gpu); hipFree(x_gpu); hipFree(y_gpu); hipFree(tmp_gpu); return 0; }
1e7ea52f86b6f1393531973949c34c6ec231b691.cu
/** * gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> #include "../../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define N 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < N; j++) { tmp[i] = A[i*N + j] * x[j] + tmp[i]; y[i] = B[i*N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void init(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* A_gpu, DATA_TYPE* x_gpu) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE) i) / N; x_gpu[i] = ((DATA_TYPE) i) / N; for (j = 0; j < N; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; A_gpu[i*N + j] = ((DATA_TYPE) i*j) / N; } } } void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<(N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int j; for(j = 0; j < N; j++) { tmp[i] += a[i * N + j] * x[j]; y[i] += b[i * N + j] * x[j]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void gesummvCuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* x_gpu, DATA_TYPE* y_gpu, DATA_TYPE* tmp_gpu) { cudaStream_t stream1; cudaStream_t stream2; cudaStream_t stream3; cudaStream_t stream4; cudaStream_t stream5; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); cudaStreamCreate(&stream4); cudaStreamCreate(&stream5); #ifdef PREF double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); cudaMemPrefetchAsync(A_gpu,N*N*sizeof(DATA_TYPE), GPU_DEVICE, stream1 ); cudaMemPrefetchAsync(B_gpu,N*N*sizeof(DATA_TYPE), GPU_DEVICE, stream2 ); cudaMemPrefetchAsync(x_gpu,N*sizeof(DATA_TYPE), GPU_DEVICE, stream3 ); cudaMemPrefetchAsync(y_gpu,N*sizeof(DATA_TYPE), GPU_DEVICE, stream4 ); cudaMemPrefetchAsync(tmp_gpu,N*sizeof(DATA_TYPE), GPU_DEVICE, stream5 ); cudaStreamSynchronize(stream1); cudaStreamSynchronize(stream2); cudaStreamSynchronize(stream3); cudaStreamSynchronize(stream4); cudaStreamSynchronize(stream5); t_start = rtclock(); for (int i = 0; i < 1; i++){ gesummv_kernel<<< grid, block, 0 , stream5>>>(A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu); cudaDeviceSynchronize(); } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #else double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); t_start = rtclock(); for (int i = 0; i < 1; i++){ gesummv_kernel<<< grid, block>>>(A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu); cudaDeviceSynchronize(); } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #endif } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* tmp; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); cudaMallocManaged((void **)&A_gpu, sizeof(DATA_TYPE) * N * N); cudaMallocManaged((void **)&B_gpu, sizeof(DATA_TYPE) * N * N); cudaMallocManaged((void **)&x_gpu, sizeof(DATA_TYPE) * N); cudaMallocManaged((void **)&y_gpu, sizeof(DATA_TYPE) * N); cudaMallocManaged((void **)&tmp_gpu, sizeof(DATA_TYPE) * N); init(A, x, A_gpu, x_gpu); GPU_argv_init(); gesummvCuda(A_gpu, B_gpu, x_gpu, y_gpu, tmp_gpu); t_start = rtclock(); gesummv(A, B, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_gpu); free(A); free(B); free(x); free(y); free(tmp); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(x_gpu); cudaFree(y_gpu); cudaFree(tmp_gpu); return 0; }
9c3db743327a0fda1819a88c052bf58a29777393.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mj-draw.h" #include <jetson-utils/cudaFont.h> extern int blink_state; imu_info_t imu_data; tele_cam_info_t cam_data; stream_info_t stream_data; g_distance_info_t g_distance_data; osd_ctl_info_t osd_ctl_switch; std::string temp_str_c; int min_of_them(int a, int b){ return a < b ? a : b; } inline __host__ __device__ float4 alpha_blend( const float4& bg, const float4& fg ) { const float alpha = fg.w / 255.0f; const float ialph = 1.0f - alpha; return make_float4(alpha * fg.x + ialph * bg.x, alpha * fg.y + ialph * bg.y, alpha * fg.z + ialph * bg.z, bg.w); } __global__ void gpuBlendBOx( uchar3* input, uchar3* output, int imgWidth, int imgHeight) { float4 color_temp, color_temp1; color_temp.x = 0; color_temp.y = 0; color_temp.z = 200; color_temp.w = 0; color_temp1.x = 200; color_temp1.y = 0; color_temp1.z = 0; color_temp1.w = 125; const float px_glyph = 1; const float px_glyph1 = 1; int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = (float)imgWidth/4 + box_x; int img_y = (float)imgHeight/4 + box_y; if(img_x >= imgWidth || img_y >= imgHeight){ return; } if(box_x >= (imgWidth/2) || box_y >= (imgHeight/2)){ return; } const float4 px_font = make_float4(px_glyph * color_temp.x, px_glyph * color_temp.y, px_glyph * color_temp.z, px_glyph * color_temp.w); const float4 px_in = cast_vec<float4>(input[img_y * imgWidth + img_x]); const float4 px_font1 = make_float4(px_glyph1 * color_temp1.x, px_glyph1 * color_temp1.y, px_glyph1 * color_temp1.z, px_glyph1 * color_temp1.w); const float4 px_in1 = cast_vec<float4>(input[img_y * imgWidth + img_x]); if(box_x > imgWidth/8*3 && box_y > imgHeight/8*3){ output[img_y * imgWidth + img_x] = cast_vec<uchar3>(alpha_blend(px_in1, px_font1)); }else{ output[img_y * imgWidth + img_x] = cast_vec<uchar3>(alpha_blend(px_in, px_font)); } } /* description: draw a circle, the circle center is the img center */ __global__ void gpuDrawCircle(uchar3 * img, int width, int height, int radius, int thickness) { uchar3 pixel_temp; pixel_temp.x = 0; pixel_temp.y = 250; pixel_temp.z = 0; if(thickness > 8 || thickness < 0){ thickness = 8; } int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = ((float)width - 2*radius)/2 + box_x; int img_y = ((float)height - 2*radius)/2 + box_y; if(img_x >= width || img_y >= height){ return; } if(box_x >= (radius*2) || box_y >= (radius*2)){ return; } float result = (box_x - radius) * (box_x - radius); result = result + (box_y - radius) * (box_y - radius); // result = result - radius * radius; if( result - (float)radius*radius <= 0 && result - (float)radius*radius >= -200 ){ img[img_y * width + img_x] = pixel_temp; } } __global__ void gpuDrawSolidCircle_pos(uchar3 * img, int width, int height, int radius, int2 center_pos, uchar3 color) { uchar3 pixel_temp; pixel_temp = color; int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = center_pos.x - radius + box_x; int img_y = center_pos.y - radius + box_y; if(img_x >= width || img_y >= height){ return; } if(box_x >= (radius*2) || box_y >= (radius*2)){ return; } float result = (box_x - radius) * (box_x - radius); result = result + (box_y - radius) * (box_y - radius); if( result - (float)radius*radius <= 0){ img[img_y * width + img_x] = pixel_temp; } } /* description: draw a box ,box center is the image center para1: */ __global__ void gpuDrawBox(uchar3 * img, int width, int height, int box_width, int box_height, int thickness) { uchar3 pixel_temp; pixel_temp.x = 0; pixel_temp.y = 250; pixel_temp.z = 0; if(thickness > 8 || thickness < 0){ thickness = 8; } int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = (width - box_width)/2 + box_x; int img_y = (height - box_height)/2 + box_y; if((img_x + thickness/2) >= width || (img_y + thickness/2) >= height){ return; } if(box_x >= box_width || box_y >= box_height){ return; } int i = 0; if(box_x == 0 || box_x == (box_width - 1)) { for(i = 0;i < thickness;++i){ img[img_y * width + (img_x - thickness/2 + i)] = pixel_temp; } } if(box_y == 0 || box_y == (box_height - 1)) { for(i = 0;i < thickness;++i){ img[(img_y - thickness/2 + i) * width + img_x] = pixel_temp; } } } /* * draw a cross, input para "int2 pos " is the origin pos of box, not the center pos of box */ __global__ void gpuDrawCross_pos (uchar3 * img, int width, int height, int crossLen, int thickness, int2 pos) { uchar3 pixel_temp; pixel_temp.x = 0; pixel_temp.y = 250; pixel_temp.z = 0; if(thickness > 8 || thickness < 0){ thickness = 8; } int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = pos.x + box_x; int img_y = pos.y + box_y; if(img_x >= width || img_y >= height){ return; } if(box_x >= crossLen || box_y >= crossLen){ return; } uint i = 0; if(box_y == (crossLen / 2)){ for(i = 0;i < thickness;++i){ img[ (img_y - thickness/2 + i) * width + img_x] = pixel_temp; } } if(box_x == (crossLen / 2)){ for(i = 0;i < thickness;++i){ img[ img_y * width + (img_x - thickness/2 + i) ] = pixel_temp; } } } /* description: draw a cross, cross center is the img center para1: */ __global__ void gpuDrawCross_mj (uchar3 * img, int width, int height, int crossLen, int thickness) { uchar3 pixel_temp; pixel_temp.x = 0; pixel_temp.y = 250; pixel_temp.z = 0; if(thickness > 8 || thickness < 0){ thickness = 8; } int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = (width - crossLen) / 2 + box_x; int img_y = (height - crossLen) / 2 + box_y; if(img_x >= width || img_y >= height){ return; } if(box_x >= crossLen || box_y >= crossLen){ return; } uint i = 0; if(box_y == (crossLen / 2)){ for(i = 0;i < thickness;++i){ img[ (img_y - thickness/2 + i) * width + img_x] = pixel_temp; } } if(box_x == (crossLen / 2)){ for(i = 0;i < thickness;++i){ img[ img_y * width + (img_x - thickness/2 + i)] = pixel_temp; } } } /************ * * application * function * *************/ void app_draw_cross_on_img(uchar3* img, int width, int height, int cross_len, int thickness) { if(cross_len > width || cross_len > height){ cross_len = min_of_them(width, height); } dim3 blockDim(8,8); dim3 gridDim(iDivUp(cross_len, blockDim.x), iDivUp(cross_len, blockDim.y)); hipLaunchKernelGGL(( gpuDrawCross_mj), dim3(gridDim), dim3(blockDim), 0, 0, img, width, height, cross_len, thickness); } void app_draw_cross_on_img_pos(uchar3* img, int width, int height, int cross_len, int thickness, int2 pos) { if(cross_len > width || cross_len > height){ cross_len = min_of_them(width, height); } dim3 blockDim(8,8); dim3 gridDim(iDivUp(cross_len, blockDim.x), iDivUp(cross_len, blockDim.y)); hipLaunchKernelGGL(( gpuDrawCross_pos), dim3(gridDim), dim3(blockDim), 0, 0, img, width, height, cross_len, thickness, pos); } void app_draw_Box_on_img(uchar3* img, int width, int height, int box_w, int box_h, int thickness) { if(box_w > width || box_h > height){ return ; } dim3 blockDim(8,8); dim3 gridDim(iDivUp(box_w, blockDim.x), iDivUp(box_h, blockDim.y)); hipLaunchKernelGGL(( gpuDrawBox), dim3(gridDim), dim3(blockDim), 0, 0, img, width, height, box_w, box_h, thickness); } void app_draw_circle_on_img(uchar3 * img, int width, int height, int radius, int thickness) { if((radius * 2) > width || (radius * 2) > height){ return ; } dim3 blockDim(8,8); dim3 gridDim(iDivUp(radius*2, blockDim.x), iDivUp(radius*2, blockDim.y)); hipLaunchKernelGGL(( gpuDrawCircle), dim3(gridDim), dim3(blockDim), 0, 0, img, width, height, radius, thickness); } void app_blend_on_img(uchar3 * img, int width, int height, int thickness) { dim3 blockDim(8,8); dim3 gridDim(iDivUp(width/2, blockDim.x), iDivUp(height/2, blockDim.y)); hipLaunchKernelGGL(( gpuBlendBOx), dim3(gridDim), dim3(blockDim), 0, 0, img, img, width, height); } void app_draw_solidCircle_on_img(uchar3 * img, int width, int height, int radius, int2 center_pos) { uchar3 color_pixel; color_pixel.x = 255; color_pixel.y = 0; color_pixel.z = 0; dim3 blockDim(8,8); dim3 gridDim(iDivUp(radius*2, blockDim.x), iDivUp(radius*2, blockDim.y)); hipLaunchKernelGGL(( gpuDrawSolidCircle_pos), dim3(gridDim), dim3(blockDim), 0, 0, img, width, height, radius, center_pos, color_pixel); } int app_text_overlay(cudaFont* font, uchar3 * image, int width, int height) { temp_str_c = "H.265"; /* imu_data.year = 2020; imu_data.month = 3; imu_data.date = 6; imu_data.hour = 13; imu_data.minute = 54; imu_data.second = 45; imu_data.yaw = 359.123; imu_data.roll = 11.123; imu_data.pitch = 22.123; imu_data.longitude = 125.3; imu_data.latitude = 34.7; imu_data.height = 14000; cam_data.zoom = 4; cam_data.memory_left = 1024; cam_data.pics_amount = 8; cam_data.pics_num_already_sync = 6; */ stream_data.width = 1920; stream_data.height = 1080; stream_data.frame_rate = 30; stream_data.code_type = temp_str_c; stream_data.bps = 1; char str_temp[256]; char str_temp1[50]; //imu_info // if(osd_ctl_switch.imu_data_osd_switch == 1){ // sprintf(str_temp, "%d-%d-%d %d:%d:%d", imu_data.year, imu_data.month, imu_data.date, imu_data.hour, imu_data.minute, imu_data.second); // font->OverlayText_edge_alig(image, width, height, // str_temp, 5, 5, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),3); // sprintf(str_temp, "yaw: %.3f", imu_data.yaw); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 5, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "pitch: %.3f", imu_data.pitch); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 45, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "roll: %.3f", imu_data.roll); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 85, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "log: %.3f", imu_data.longitude); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 125, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "lat: %.3f", imu_data.latitude); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 165, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "hgt: %.3f", imu_data.height); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 205, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // } //cam info if(osd_ctl_switch.cam_info_osd_switch == 1){ sprintf(str_temp, "cam zoom: %d", cam_data.zoom); font->OverlayText_edge_alig(image, width, height, str_temp, 5, 45, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); sprintf(str_temp, "memory left: %d", cam_data.memory_left); font->OverlayText_edge_alig(image, width, height, str_temp, 5, 85, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); sprintf(str_temp, "cam pics num: %d", cam_data.pics_captured_amount); font->OverlayText_edge_alig(image, width, height, str_temp, 5, 125, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); sprintf(str_temp, "cam pics sync: %d", cam_data.pics_num_already_sync); font->OverlayText_edge_alig(image, width, height, str_temp, 5, 165, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); } //stream show if(osd_ctl_switch.stream_info_osd_switch == 1){ for(int i = 0;i < stream_data.code_type.length();i++){ if(stream_data.code_type.length() > 49) return 0; str_temp1[i] = stream_data.code_type[i]; } sprintf(str_temp, "%dx%d@%dfps/%s/%dMbps", stream_data.width, stream_data.height, stream_data.frame_rate, str_temp1, stream_data.bps); font->OverlayText_edge_alig(image, width, height, str_temp, width, height -30 , make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 50),0); float4 temp_rect_pos = font->first_string_pos; if(blink_state == 1){ app_draw_solidCircle_on_img(image, width, height, 10, make_int2(temp_rect_pos.x - 15 -3 ,(temp_rect_pos.y + temp_rect_pos.w)/2) ); } } return 0; } void init_ros_message_data() { osd_ctl_switch.cam_info_osd_switch = 1; osd_ctl_switch.imu_data_osd_switch = 1; osd_ctl_switch.stream_info_osd_switch = 1; osd_ctl_switch.cross_display_osd_switch = 1; osd_ctl_switch.telephoto_cam_view_box_osd_switch = 1; }
9c3db743327a0fda1819a88c052bf58a29777393.cu
#include "mj-draw.h" #include <jetson-utils/cudaFont.h> extern int blink_state; imu_info_t imu_data; tele_cam_info_t cam_data; stream_info_t stream_data; g_distance_info_t g_distance_data; osd_ctl_info_t osd_ctl_switch; std::string temp_str_c; int min_of_them(int a, int b){ return a < b ? a : b; } inline __host__ __device__ float4 alpha_blend( const float4& bg, const float4& fg ) { const float alpha = fg.w / 255.0f; const float ialph = 1.0f - alpha; return make_float4(alpha * fg.x + ialph * bg.x, alpha * fg.y + ialph * bg.y, alpha * fg.z + ialph * bg.z, bg.w); } __global__ void gpuBlendBOx( uchar3* input, uchar3* output, int imgWidth, int imgHeight) { float4 color_temp, color_temp1; color_temp.x = 0; color_temp.y = 0; color_temp.z = 200; color_temp.w = 0; color_temp1.x = 200; color_temp1.y = 0; color_temp1.z = 0; color_temp1.w = 125; const float px_glyph = 1; const float px_glyph1 = 1; int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = (float)imgWidth/4 + box_x; int img_y = (float)imgHeight/4 + box_y; if(img_x >= imgWidth || img_y >= imgHeight){ return; } if(box_x >= (imgWidth/2) || box_y >= (imgHeight/2)){ return; } const float4 px_font = make_float4(px_glyph * color_temp.x, px_glyph * color_temp.y, px_glyph * color_temp.z, px_glyph * color_temp.w); const float4 px_in = cast_vec<float4>(input[img_y * imgWidth + img_x]); const float4 px_font1 = make_float4(px_glyph1 * color_temp1.x, px_glyph1 * color_temp1.y, px_glyph1 * color_temp1.z, px_glyph1 * color_temp1.w); const float4 px_in1 = cast_vec<float4>(input[img_y * imgWidth + img_x]); if(box_x > imgWidth/8*3 && box_y > imgHeight/8*3){ output[img_y * imgWidth + img_x] = cast_vec<uchar3>(alpha_blend(px_in1, px_font1)); }else{ output[img_y * imgWidth + img_x] = cast_vec<uchar3>(alpha_blend(px_in, px_font)); } } /* description: draw a circle, the circle center is the img center */ __global__ void gpuDrawCircle(uchar3 * img, int width, int height, int radius, int thickness) { uchar3 pixel_temp; pixel_temp.x = 0; pixel_temp.y = 250; pixel_temp.z = 0; if(thickness > 8 || thickness < 0){ thickness = 8; } int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = ((float)width - 2*radius)/2 + box_x; int img_y = ((float)height - 2*radius)/2 + box_y; if(img_x >= width || img_y >= height){ return; } if(box_x >= (radius*2) || box_y >= (radius*2)){ return; } float result = (box_x - radius) * (box_x - radius); result = result + (box_y - radius) * (box_y - radius); // result = result - radius * radius; if( result - (float)radius*radius <= 0 && result - (float)radius*radius >= -200 ){ img[img_y * width + img_x] = pixel_temp; } } __global__ void gpuDrawSolidCircle_pos(uchar3 * img, int width, int height, int radius, int2 center_pos, uchar3 color) { uchar3 pixel_temp; pixel_temp = color; int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = center_pos.x - radius + box_x; int img_y = center_pos.y - radius + box_y; if(img_x >= width || img_y >= height){ return; } if(box_x >= (radius*2) || box_y >= (radius*2)){ return; } float result = (box_x - radius) * (box_x - radius); result = result + (box_y - radius) * (box_y - radius); if( result - (float)radius*radius <= 0){ img[img_y * width + img_x] = pixel_temp; } } /* description: draw a box ,box center is the image center para1: */ __global__ void gpuDrawBox(uchar3 * img, int width, int height, int box_width, int box_height, int thickness) { uchar3 pixel_temp; pixel_temp.x = 0; pixel_temp.y = 250; pixel_temp.z = 0; if(thickness > 8 || thickness < 0){ thickness = 8; } int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = (width - box_width)/2 + box_x; int img_y = (height - box_height)/2 + box_y; if((img_x + thickness/2) >= width || (img_y + thickness/2) >= height){ return; } if(box_x >= box_width || box_y >= box_height){ return; } int i = 0; if(box_x == 0 || box_x == (box_width - 1)) { for(i = 0;i < thickness;++i){ img[img_y * width + (img_x - thickness/2 + i)] = pixel_temp; } } if(box_y == 0 || box_y == (box_height - 1)) { for(i = 0;i < thickness;++i){ img[(img_y - thickness/2 + i) * width + img_x] = pixel_temp; } } } /* * draw a cross, input para "int2 pos " is the origin pos of box, not the center pos of box */ __global__ void gpuDrawCross_pos (uchar3 * img, int width, int height, int crossLen, int thickness, int2 pos) { uchar3 pixel_temp; pixel_temp.x = 0; pixel_temp.y = 250; pixel_temp.z = 0; if(thickness > 8 || thickness < 0){ thickness = 8; } int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = pos.x + box_x; int img_y = pos.y + box_y; if(img_x >= width || img_y >= height){ return; } if(box_x >= crossLen || box_y >= crossLen){ return; } uint i = 0; if(box_y == (crossLen / 2)){ for(i = 0;i < thickness;++i){ img[ (img_y - thickness/2 + i) * width + img_x] = pixel_temp; } } if(box_x == (crossLen / 2)){ for(i = 0;i < thickness;++i){ img[ img_y * width + (img_x - thickness/2 + i) ] = pixel_temp; } } } /* description: draw a cross, cross center is the img center para1: */ __global__ void gpuDrawCross_mj (uchar3 * img, int width, int height, int crossLen, int thickness) { uchar3 pixel_temp; pixel_temp.x = 0; pixel_temp.y = 250; pixel_temp.z = 0; if(thickness > 8 || thickness < 0){ thickness = 8; } int box_x = blockIdx.x * blockDim.x + threadIdx.x; int box_y = blockIdx.y * blockDim.y + threadIdx.y; int img_x = (width - crossLen) / 2 + box_x; int img_y = (height - crossLen) / 2 + box_y; if(img_x >= width || img_y >= height){ return; } if(box_x >= crossLen || box_y >= crossLen){ return; } uint i = 0; if(box_y == (crossLen / 2)){ for(i = 0;i < thickness;++i){ img[ (img_y - thickness/2 + i) * width + img_x] = pixel_temp; } } if(box_x == (crossLen / 2)){ for(i = 0;i < thickness;++i){ img[ img_y * width + (img_x - thickness/2 + i)] = pixel_temp; } } } /************ * * application * function * *************/ void app_draw_cross_on_img(uchar3* img, int width, int height, int cross_len, int thickness) { if(cross_len > width || cross_len > height){ cross_len = min_of_them(width, height); } dim3 blockDim(8,8); dim3 gridDim(iDivUp(cross_len, blockDim.x), iDivUp(cross_len, blockDim.y)); gpuDrawCross_mj<<<gridDim, blockDim>>>( img, width, height, cross_len, thickness); } void app_draw_cross_on_img_pos(uchar3* img, int width, int height, int cross_len, int thickness, int2 pos) { if(cross_len > width || cross_len > height){ cross_len = min_of_them(width, height); } dim3 blockDim(8,8); dim3 gridDim(iDivUp(cross_len, blockDim.x), iDivUp(cross_len, blockDim.y)); gpuDrawCross_pos<<<gridDim, blockDim>>>( img, width, height, cross_len, thickness, pos); } void app_draw_Box_on_img(uchar3* img, int width, int height, int box_w, int box_h, int thickness) { if(box_w > width || box_h > height){ return ; } dim3 blockDim(8,8); dim3 gridDim(iDivUp(box_w, blockDim.x), iDivUp(box_h, blockDim.y)); gpuDrawBox<<<gridDim, blockDim>>>( img, width, height, box_w, box_h, thickness); } void app_draw_circle_on_img(uchar3 * img, int width, int height, int radius, int thickness) { if((radius * 2) > width || (radius * 2) > height){ return ; } dim3 blockDim(8,8); dim3 gridDim(iDivUp(radius*2, blockDim.x), iDivUp(radius*2, blockDim.y)); gpuDrawCircle<<<gridDim, blockDim>>>(img, width, height, radius, thickness); } void app_blend_on_img(uchar3 * img, int width, int height, int thickness) { dim3 blockDim(8,8); dim3 gridDim(iDivUp(width/2, blockDim.x), iDivUp(height/2, blockDim.y)); gpuBlendBOx<<<gridDim, blockDim>>>(img, img, width, height); } void app_draw_solidCircle_on_img(uchar3 * img, int width, int height, int radius, int2 center_pos) { uchar3 color_pixel; color_pixel.x = 255; color_pixel.y = 0; color_pixel.z = 0; dim3 blockDim(8,8); dim3 gridDim(iDivUp(radius*2, blockDim.x), iDivUp(radius*2, blockDim.y)); gpuDrawSolidCircle_pos<<<gridDim, blockDim>>>(img, width, height, radius, center_pos, color_pixel); } int app_text_overlay(cudaFont* font, uchar3 * image, int width, int height) { temp_str_c = "H.265"; /* imu_data.year = 2020; imu_data.month = 3; imu_data.date = 6; imu_data.hour = 13; imu_data.minute = 54; imu_data.second = 45; imu_data.yaw = 359.123; imu_data.roll = 11.123; imu_data.pitch = 22.123; imu_data.longitude = 125.3; imu_data.latitude = 34.7; imu_data.height = 14000; cam_data.zoom = 4; cam_data.memory_left = 1024; cam_data.pics_amount = 8; cam_data.pics_num_already_sync = 6; */ stream_data.width = 1920; stream_data.height = 1080; stream_data.frame_rate = 30; stream_data.code_type = temp_str_c; stream_data.bps = 1; char str_temp[256]; char str_temp1[50]; //imu_info // if(osd_ctl_switch.imu_data_osd_switch == 1){ // sprintf(str_temp, "%d-%d-%d %d:%d:%d", imu_data.year, imu_data.month, imu_data.date, imu_data.hour, imu_data.minute, imu_data.second); // font->OverlayText_edge_alig(image, width, height, // str_temp, 5, 5, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),3); // sprintf(str_temp, "yaw: %.3f", imu_data.yaw); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 5, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "pitch: %.3f", imu_data.pitch); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 45, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "roll: %.3f", imu_data.roll); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 85, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "log: %.3f", imu_data.longitude); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 125, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "lat: %.3f", imu_data.latitude); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 165, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // sprintf(str_temp, "hgt: %.3f", imu_data.height); // font->OverlayText_edge_alig(image, width, height, // str_temp, width, 205, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); // } //cam info if(osd_ctl_switch.cam_info_osd_switch == 1){ sprintf(str_temp, "cam zoom: %d", cam_data.zoom); font->OverlayText_edge_alig(image, width, height, str_temp, 5, 45, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); sprintf(str_temp, "memory left: %d", cam_data.memory_left); font->OverlayText_edge_alig(image, width, height, str_temp, 5, 85, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); sprintf(str_temp, "cam pics num: %d", cam_data.pics_captured_amount); font->OverlayText_edge_alig(image, width, height, str_temp, 5, 125, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); sprintf(str_temp, "cam pics sync: %d", cam_data.pics_num_already_sync); font->OverlayText_edge_alig(image, width, height, str_temp, 5, 165, make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 10),0); } //stream show if(osd_ctl_switch.stream_info_osd_switch == 1){ for(int i = 0;i < stream_data.code_type.length();i++){ if(stream_data.code_type.length() > 49) return 0; str_temp1[i] = stream_data.code_type[i]; } sprintf(str_temp, "%dx%d@%dfps/%s/%dMbps", stream_data.width, stream_data.height, stream_data.frame_rate, str_temp1, stream_data.bps); font->OverlayText_edge_alig(image, width, height, str_temp, width, height -30 , make_float4(0, 255, 0, 255), make_float4(0, 0, 0, 50),0); float4 temp_rect_pos = font->first_string_pos; if(blink_state == 1){ app_draw_solidCircle_on_img(image, width, height, 10, make_int2(temp_rect_pos.x - 15 -3 ,(temp_rect_pos.y + temp_rect_pos.w)/2) ); } } return 0; } void init_ros_message_data() { osd_ctl_switch.cam_info_osd_switch = 1; osd_ctl_switch.imu_data_osd_switch = 1; osd_ctl_switch.stream_info_osd_switch = 1; osd_ctl_switch.cross_display_osd_switch = 1; osd_ctl_switch.telephoto_cam_view_box_osd_switch = 1; }
3da3e812486f190ce7b44aab04a710c07b81d4a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include "kernel.hip" #include <time.h> #include "cuda_mpz.h" int main (int argc, char *argv[]) { ///////input control if (argc < 2){ exit(EXIT_FAILURE); } long x = strtol(argv[2], NULL, 10); long long unsigned pairs = x; unsigned thread_num = 2; long long unsigned data_num = pairs * 2; ///////host memory long long int *clockTable_h; clockTable_h = (long long int*) malloc( pairs * sizeof(long long int)); cuda_mpz_t h_n; cuda_mpz_t h_n_; cuda_mpz_t h_r2; ///////get n char n_input[] = "00000038f6e8cfba55dd0e47"; cuda_mpz_set_str_host(&h_n, n_input); ///////get n_ char n__input[] = "0000002e8457440e0d93c489"; cuda_mpz_set_str_host(&h_n_, n__input); ///////get r2 char r2_input[] = "0000003709d17d8f8686609f"; cuda_mpz_set_str_host(&h_r2, r2_input); ///////get Messages long long unsigned mesSize = sizeof(cuda_mpz_t) * data_num; cuda_mpz_t *myMes1_h; myMes1_h = (cuda_mpz_t*) malloc (mesSize); ///////get Message pairs char* line = NULL; size_t len = 0; FILE* fp2 = fopen(argv[1], "r");//input from pair storage if (fp2 == NULL){ exit(EXIT_FAILURE); } long long unsigned line_num = 0; while ((getline(&line, &len, fp2)) != -1) { line[strcspn(line, "\n")] = 0; cuda_mpz_set_str_host(&myMes1_h[line_num], line); line_num++; if(line_num == data_num){ break; } } fclose(fp2); if (line) free(line); cuda_mpz_t *myMes1_d; hipMalloc((cuda_mpz_t **) &myMes1_d, mesSize); hipMemcpy(myMes1_d, myMes1_h, mesSize, hipMemcpyHostToDevice); ///////get d char d_input[] = "1011011001001001010011110110010101010111001010110101111000111100001"; int d_bitsLength = (int)strlen(d_input); int* dBits = (int *) malloc(sizeof(int) * d_bitsLength); int* dBits_d; hipMalloc((void **) &dBits_d, sizeof(int) * d_bitsLength); int d_iterator = 0; while ( d_iterator < d_bitsLength){ if( d_input[d_iterator] == '1'){//big endian dBits[d_iterator] = 1; } else{ dBits[d_iterator] = 0; } d_iterator++; } hipMemcpy(dBits_d, dBits, sizeof(int) * d_bitsLength, hipMemcpyHostToDevice); ///////device memory long long int *clockTable_d; hipMalloc((void **) &clockTable_d, 1 * sizeof(long long int)); hipLaunchKernelGGL(( MontSQMLadder), dim3(1), dim3(thread_num), 0, 0, myMes1_d, pairs, h_r2, h_n, h_n_, dBits_d, d_bitsLength, clockTable_d);/////////////////////////////////////////kernel hipDeviceSynchronize(); hipMemcpy(clockTable_h, clockTable_d, 1 * sizeof(long long int), hipMemcpyDeviceToHost); long long int sum1 = clockTable_h[0]; sum1 = sum1 / pairs; printf("%lld", sum1); ////////free device hipFree(clockTable_d); hipFree(dBits_d); hipFree(myMes1_d); ////////free host free(clockTable_h); free(myMes1_h); free(dBits); return 0; }
3da3e812486f190ce7b44aab04a710c07b81d4a4.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "kernel.cu" #include <time.h> #include "cuda_mpz.h" int main (int argc, char *argv[]) { ///////input control if (argc < 2){ exit(EXIT_FAILURE); } long x = strtol(argv[2], NULL, 10); long long unsigned pairs = x; unsigned thread_num = 2; long long unsigned data_num = pairs * 2; ///////host memory long long int *clockTable_h; clockTable_h = (long long int*) malloc( pairs * sizeof(long long int)); cuda_mpz_t h_n; cuda_mpz_t h_n_; cuda_mpz_t h_r2; ///////get n char n_input[] = "00000038f6e8cfba55dd0e47"; cuda_mpz_set_str_host(&h_n, n_input); ///////get n_ char n__input[] = "0000002e8457440e0d93c489"; cuda_mpz_set_str_host(&h_n_, n__input); ///////get r2 char r2_input[] = "0000003709d17d8f8686609f"; cuda_mpz_set_str_host(&h_r2, r2_input); ///////get Messages long long unsigned mesSize = sizeof(cuda_mpz_t) * data_num; cuda_mpz_t *myMes1_h; myMes1_h = (cuda_mpz_t*) malloc (mesSize); ///////get Message pairs char* line = NULL; size_t len = 0; FILE* fp2 = fopen(argv[1], "r");//input from pair storage if (fp2 == NULL){ exit(EXIT_FAILURE); } long long unsigned line_num = 0; while ((getline(&line, &len, fp2)) != -1) { line[strcspn(line, "\n")] = 0; cuda_mpz_set_str_host(&myMes1_h[line_num], line); line_num++; if(line_num == data_num){ break; } } fclose(fp2); if (line) free(line); cuda_mpz_t *myMes1_d; cudaMalloc((cuda_mpz_t **) &myMes1_d, mesSize); cudaMemcpy(myMes1_d, myMes1_h, mesSize, cudaMemcpyHostToDevice); ///////get d char d_input[] = "1011011001001001010011110110010101010111001010110101111000111100001"; int d_bitsLength = (int)strlen(d_input); int* dBits = (int *) malloc(sizeof(int) * d_bitsLength); int* dBits_d; cudaMalloc((void **) &dBits_d, sizeof(int) * d_bitsLength); int d_iterator = 0; while ( d_iterator < d_bitsLength){ if( d_input[d_iterator] == '1'){//big endian dBits[d_iterator] = 1; } else{ dBits[d_iterator] = 0; } d_iterator++; } cudaMemcpy(dBits_d, dBits, sizeof(int) * d_bitsLength, cudaMemcpyHostToDevice); ///////device memory long long int *clockTable_d; cudaMalloc((void **) &clockTable_d, 1 * sizeof(long long int)); MontSQMLadder<<<1, thread_num>>>(myMes1_d, pairs, h_r2, h_n, h_n_, dBits_d, d_bitsLength, clockTable_d);/////////////////////////////////////////kernel cudaDeviceSynchronize(); cudaMemcpy(clockTable_h, clockTable_d, 1 * sizeof(long long int), cudaMemcpyDeviceToHost); long long int sum1 = clockTable_h[0]; sum1 = sum1 / pairs; printf("%lld", sum1); ////////free device cudaFree(clockTable_d); cudaFree(dBits_d); cudaFree(myMes1_d); ////////free host free(clockTable_h); free(myMes1_h); free(dBits); return 0; }
455410bb50cf8ad5a92d4eca614c88309ddf9628.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<stdio.h> #include<helper_cuda.h> using namespace std; //kernel __device__ int g_uids = 0; __device__ void print_info(int depth, int thread, int uid, int parent_uid){ // 0 DP if(threadIdx.x == 0){ if(depth == 0) printf("BLOCK %d launched by the host\n",uid); else{ char buffer[32]; for(int i=0; i<depth; i++){ buffer[3*i+0] = '|'; buffer[3*i+1] = ' '; buffer[3*i+2] = ' '; } buffer[3*depth] = '\0'; printf("%s Depth[%d] BLOCK %d launched by block,thread:[%d,%d]\n", buffer, depth,uid, parent_uid,thread); } } __syncthreads(); } __global__ void cdp_kernel(int max_depth, int depth, int thread, int parent_uid){ //blockthreadtid=0set; s_uid id printf("hello world\n"); __shared__ int s_uid; if(threadIdx.x == 0){ s_uid = atomicAdd(&g_uids, 1); } __syncthreads(); print_info(depth, thread,s_uid,parent_uid); if(++depth >= max_depth) return; hipLaunchKernelGGL(( cdp_kernel), dim3(gridDim),dim3(blockDim), 0, 0, max_depth, depth, threadIdx.x, s_uid); } int main(int argc, char const *argv[]){ cout<<"starting CUDA Dynamic Parallelism"<<endl; int max_depth = 2; if(checkCmdLineFlag(argc, argv,"help") || checkCmdLineFlag(argc, argv,"h")){ cout<<"Usage: "<<argv[0] <<" depth=<max_depth>\t(where max_depth is a value between 1 and 8)."<<endl; exit(EXIT_SUCCESS); } if(checkCmdLineFlag(argc, argv, "depth")){ max_depth = getCmdLineArgumentInt(argc, argv, "depth"); if (max_depth<1 || max_depth>8){ cout<<"depth parameter has to be between 1 and 8"<<endl; exit(EXIT_FAILURE); } } //================================ // find/set the device int device_count = 0, device = -1; checkCudaErrors(hipGetDeviceCount(&device_count)); for(int i=0; i<device_count; i++){ hipDeviceProp_t properties; checkCudaErrors(hipGetDeviceProperties(&properties, i)); if(properties.major>3 || (properties.major == 3 && properties.minor >= 5)){ device = i; cout<<"Running on GPU:"<<device<<" ("<<properties.name<<") "<<endl; break; }else{ cout<<"ERROR: dynamic parallelism requires GPU devices with compute SM 3.5 or higher"<<endl; cout<<"Current GPU device["<<i<<"] has compute SM"<<properties.major<<"."<<properties.minor <<endl<<"Exiting..."<<endl; continue; } } if(device == -1){ cerr<<"dynamic parallelism requires GPU devices with compute SM 3.5 or higher"<<endl <<"Exiting..."<<endl; exit(EXIT_FAILURE); } // hipSetDevice(device); cout<<"====================================="<<endl; cout<<"The CPU launches kernel configuration<<<2,2>>> "<<endl <<"on the deviec each thread will launch kernel<<<2,2>>>"<<endl <<"The GPU we will do that recursively, until reaches"<<endl <<"max_depth="<<max_depth<<endl<<endl; cout<<"2"; int num_blocks = 2, sum = 2; for(int i=1; i<max_depth; i++){ num_blocks *= 4; cout<<"+"<<num_blocks; sum += num_blocks; } cout<<"="<<sum<<" blocks are launched ("<<sum-2<<" from GPU)"<<endl; cout<<"===============================ready run"<<endl; //CDP hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, max_depth); //hostkernel cout<<"Launching cdp_kernel with CUDA Dynamic Parallelism"<<endl; hipLaunchKernelGGL(( cdp_kernel), dim3(2),dim3(2), 0, 0, max_depth, 0, 0, -1); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); exit(EXIT_SUCCESS); }
455410bb50cf8ad5a92d4eca614c88309ddf9628.cu
#include<iostream> #include<stdio.h> #include<helper_cuda.h> using namespace std; //所有kernel都能访问到 __device__ int g_uids = 0; __device__ void print_info(int depth, int thread, int uid, int parent_uid){ // 第0号线程 发起DP if(threadIdx.x == 0){ if(depth == 0) printf("BLOCK %d launched by the host\n",uid); else{ char buffer[32]; for(int i=0; i<depth; i++){ buffer[3*i+0] = '|'; buffer[3*i+1] = ' '; buffer[3*i+2] = ' '; } buffer[3*depth] = '\0'; printf("%s Depth[%d] BLOCK %d launched by block,thread:[%d,%d]\n", buffer, depth,uid, parent_uid,thread); } } __syncthreads(); } __global__ void cdp_kernel(int max_depth, int depth, int thread, int parent_uid){ //为了让block中thread共享,但是只有tid=0的才能set; s_uid 表示块id printf("hello world\n"); __shared__ int s_uid; if(threadIdx.x == 0){ s_uid = atomicAdd(&g_uids, 1); } __syncthreads(); print_info(depth, thread,s_uid,parent_uid); if(++depth >= max_depth) return; cdp_kernel<<<gridDim,blockDim>>>(max_depth, depth, threadIdx.x, s_uid); } int main(int argc, char const *argv[]){ cout<<"starting CUDA Dynamic Parallelism"<<endl; int max_depth = 2; if(checkCmdLineFlag(argc, argv,"help") || checkCmdLineFlag(argc, argv,"h")){ cout<<"Usage: "<<argv[0] <<" depth=<max_depth>\t(where max_depth is a value between 1 and 8)."<<endl; exit(EXIT_SUCCESS); } if(checkCmdLineFlag(argc, argv, "depth")){ max_depth = getCmdLineArgumentInt(argc, argv, "depth"); if (max_depth<1 || max_depth>8){ cout<<"depth parameter has to be between 1 and 8"<<endl; exit(EXIT_FAILURE); } } //================================ // find/set the device int device_count = 0, device = -1; checkCudaErrors(cudaGetDeviceCount(&device_count)); for(int i=0; i<device_count; i++){ cudaDeviceProp properties; checkCudaErrors(cudaGetDeviceProperties(&properties, i)); if(properties.major>3 || (properties.major == 3 && properties.minor >= 5)){ device = i; cout<<"Running on GPU:"<<device<<" ("<<properties.name<<") "<<endl; break; }else{ cout<<"ERROR: dynamic parallelism requires GPU devices with compute SM 3.5 or higher"<<endl; cout<<"Current GPU device["<<i<<"] has compute SM"<<properties.major<<"."<<properties.minor <<endl<<"Exiting..."<<endl; continue; } } if(device == -1){ cerr<<"dynamic parallelism requires GPU devices with compute SM 3.5 or higher"<<endl <<"Exiting..."<<endl; exit(EXIT_FAILURE); } //设置第一个符合的设备 cudaSetDevice(device); cout<<"====================================="<<endl; cout<<"The CPU launches kernel configuration<<<2,2>>> "<<endl <<"on the deviec each thread will launch kernel<<<2,2>>>"<<endl <<"The GPU we will do that recursively, until reaches"<<endl <<"max_depth="<<max_depth<<endl<<endl; cout<<"2"; int num_blocks = 2, sum = 2; for(int i=1; i<max_depth; i++){ num_blocks *= 4; cout<<"+"<<num_blocks; sum += num_blocks; } cout<<"="<<sum<<" blocks are launched ("<<sum-2<<" from GPU)"<<endl; cout<<"===============================ready run"<<endl; //限制CDP递归的深度 cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, max_depth); //host侧运行kernel cout<<"Launching cdp_kernel with CUDA Dynamic Parallelism"<<endl; cdp_kernel<<<2,2>>>(max_depth, 0, 0, -1); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); exit(EXIT_SUCCESS); }
e765234d855d95a66cdff31e46bbfde8d84c2823.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_defines.h" #include "cuda_stuff.h" #include "../zTileSize.h" static size_t sxsy=0; void CUDA_Initialize(const int sx, const int sy, const int sz, const int bord, float dx, float dy, float dz, float dt, float * restrict ch1dxx, float * restrict ch1dyy, float * restrict ch1dzz, float * restrict ch1dxy, float * restrict ch1dyz, float * restrict ch1dxz, float * restrict v2px, float * restrict v2pz, float * restrict v2sz, float * restrict v2pn, float * restrict vpz, float * restrict vsv, float * restrict epsilon, float * restrict delta, float * restrict phi, float * restrict theta, float * restrict pp, float * restrict pc, float * restrict qp, float * restrict qc) { extern float* dev_pDx; extern float* dev_pDy; extern float* dev_qDx; extern float* dev_qDy; extern float* dev_vpz; extern float* dev_vsv; extern float* dev_epsilon; extern float* dev_delta; extern float* dev_phi; extern float* dev_theta; extern float* dev_ch1dxx; extern float* dev_ch1dyy; extern float* dev_ch1dzz; extern float* dev_ch1dxy; extern float* dev_ch1dyz; extern float* dev_ch1dxz; extern float* dev_v2px; extern float* dev_v2pz; extern float* dev_v2sz; extern float* dev_v2pn; extern float* dev_pp; extern float* dev_pc; extern float* dev_qp; extern float* dev_qc; int deviceCount; CUDA_CALL(hipGetDeviceCount(&deviceCount)); const int device=1; hipDeviceProp_t deviceProp; CUDA_CALL(hipGetDeviceProperties(&deviceProp, device)); printf("CUDA source using device(%d) %s with compute capability %d.%d.\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); CUDA_CALL(hipSetDevice(device)); // Check sx,sy values if (sx%BSIZE_X != 0) { printf("sx(%d) must be multiple of BSIZE_X(%d)\n", sx, (int)BSIZE_X); exit(1); } if (sy%BSIZE_Y != 0) { printf("sy(%d) must be multiple of BSIZE_Y(%d)\n", sy, (int)BSIZE_Y); exit(1); } sxsy=sx*sy; // one plan const size_t sxsysz=sxsy*sz; const size_t msize_vol=sxsysz*sizeof(float); const size_t msize_vol_extra=msize_vol+2*sxsy*sizeof(float); // 2 extra plans for wave fields CUDA_CALL(hipMalloc(&dev_vpz, msize_vol)); CUDA_CALL(hipMemcpy(dev_vpz, vpz, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_vsv, msize_vol)); CUDA_CALL(hipMemcpy(dev_vsv, vsv, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_epsilon, msize_vol)); CUDA_CALL(hipMemcpy(dev_epsilon, epsilon, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_delta, msize_vol)); CUDA_CALL(hipMemcpy(dev_delta, delta, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_phi, msize_vol)); CUDA_CALL(hipMemcpy(dev_phi, phi, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_theta, msize_vol)); CUDA_CALL(hipMemcpy(dev_theta, theta, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_ch1dxx, msize_vol)); CUDA_CALL(hipMemcpy(dev_ch1dxx, ch1dxx, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_ch1dyy, msize_vol)); CUDA_CALL(hipMemcpy(dev_ch1dyy, ch1dyy, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_ch1dzz, msize_vol)); CUDA_CALL(hipMemcpy(dev_ch1dzz, ch1dzz, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_ch1dxy, msize_vol)); CUDA_CALL(hipMemcpy(dev_ch1dxy, ch1dxy, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_ch1dyz, msize_vol)); CUDA_CALL(hipMemcpy(dev_ch1dyz, ch1dyz, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_ch1dxz, msize_vol)); CUDA_CALL(hipMemcpy(dev_ch1dxz, ch1dxz, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_v2px, msize_vol)); CUDA_CALL(hipMemcpy(dev_v2px, v2px, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_v2pz, msize_vol)); CUDA_CALL(hipMemcpy(dev_v2pz, v2pz, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_v2sz, msize_vol)); CUDA_CALL(hipMemcpy(dev_v2sz, v2sz, msize_vol, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&dev_v2pn, msize_vol)); CUDA_CALL(hipMemcpy(dev_v2pn, v2pn, msize_vol, hipMemcpyHostToDevice)); // Wave field arrays with an extra plan CUDA_CALL(hipMalloc(&dev_pp, msize_vol_extra)); CUDA_CALL(hipMemset(dev_pp, 0, msize_vol_extra)); CUDA_CALL(hipMalloc(&dev_pc, msize_vol_extra)); CUDA_CALL(hipMemset(dev_pc, 0, msize_vol_extra)); CUDA_CALL(hipMalloc(&dev_qp, msize_vol_extra)); CUDA_CALL(hipMemset(dev_qp, 0, msize_vol_extra)); CUDA_CALL(hipMalloc(&dev_qc, msize_vol_extra)); CUDA_CALL(hipMemset(dev_qc, 0, msize_vol_extra)); dev_pp+=sxsy; dev_pc+=sxsy; dev_qp+=sxsy; dev_qc+=sxsy; const size_t msize_vol_tyle=sxsy*DIM_Z_TILE*sizeof(float); CUDA_CALL(hipMalloc(&dev_pDx, msize_vol_tyle)); CUDA_CALL(hipMemset(dev_pDx, 0, msize_vol_tyle)); CUDA_CALL(hipMalloc(&dev_pDy, msize_vol_tyle)); CUDA_CALL(hipMemset(dev_pDy, 0, msize_vol_tyle)); CUDA_CALL(hipMalloc(&dev_qDx, msize_vol_tyle)); CUDA_CALL(hipMemset(dev_qDx, 0, msize_vol_tyle)); CUDA_CALL(hipMalloc(&dev_qDy, msize_vol_tyle)); CUDA_CALL(hipMemset(dev_qDy, 0, msize_vol_tyle)); CUDA_CALL(hipGetLastError()); CUDA_CALL(hipDeviceSynchronize()); printf("GPU memory usage = %ld MiB\n", 21*msize_vol/1024/1024); } void CUDA_Finalize() { extern float* dev_vpz; extern float* dev_vsv; extern float* dev_epsilon; extern float* dev_delta; extern float* dev_phi; extern float* dev_theta; extern float* dev_ch1dxx; extern float* dev_ch1dyy; extern float* dev_ch1dzz; extern float* dev_ch1dxy; extern float* dev_ch1dyz; extern float* dev_ch1dxz; extern float* dev_v2px; extern float* dev_v2pz; extern float* dev_v2sz; extern float* dev_v2pn; extern float* dev_pp; extern float* dev_pc; extern float* dev_qp; extern float* dev_qc; extern float* dev_pDx; extern float* dev_pDy; extern float* dev_qDx; extern float* dev_qDy; dev_pp-=sxsy; dev_pc-=sxsy; dev_qp-=sxsy; dev_qc-=sxsy; CUDA_CALL(hipFree(dev_vpz)); CUDA_CALL(hipFree(dev_vsv)); CUDA_CALL(hipFree(dev_epsilon)); CUDA_CALL(hipFree(dev_delta)); CUDA_CALL(hipFree(dev_phi)); CUDA_CALL(hipFree(dev_theta)); CUDA_CALL(hipFree(dev_ch1dxx)); CUDA_CALL(hipFree(dev_ch1dyy)); CUDA_CALL(hipFree(dev_ch1dzz)); CUDA_CALL(hipFree(dev_ch1dxy)); CUDA_CALL(hipFree(dev_ch1dyz)); CUDA_CALL(hipFree(dev_ch1dxz)); CUDA_CALL(hipFree(dev_v2px)); CUDA_CALL(hipFree(dev_v2pz)); CUDA_CALL(hipFree(dev_v2sz)); CUDA_CALL(hipFree(dev_v2pn)); CUDA_CALL(hipFree(dev_pp)); CUDA_CALL(hipFree(dev_pc)); CUDA_CALL(hipFree(dev_qp)); CUDA_CALL(hipFree(dev_qc)); CUDA_CALL(hipFree(dev_pDx)); CUDA_CALL(hipFree(dev_qDx)); CUDA_CALL(hipFree(dev_pDy)); CUDA_CALL(hipFree(dev_qDy)); printf("CUDA_Finalize: SUCCESS\n"); } void CUDA_Update_pointers(const int sx, const int sy, const int sz, float *pc) { extern float* dev_pc; const size_t sxsysz=((size_t)sx*sy)*sz; const size_t msize_vol=sxsysz*sizeof(float); if (pc) CUDA_CALL(hipMemcpy(pc, dev_pc, msize_vol, hipMemcpyDeviceToHost)); }
e765234d855d95a66cdff31e46bbfde8d84c2823.cu
#include "cuda_defines.h" #include "cuda_stuff.h" #include "../zTileSize.h" static size_t sxsy=0; void CUDA_Initialize(const int sx, const int sy, const int sz, const int bord, float dx, float dy, float dz, float dt, float * restrict ch1dxx, float * restrict ch1dyy, float * restrict ch1dzz, float * restrict ch1dxy, float * restrict ch1dyz, float * restrict ch1dxz, float * restrict v2px, float * restrict v2pz, float * restrict v2sz, float * restrict v2pn, float * restrict vpz, float * restrict vsv, float * restrict epsilon, float * restrict delta, float * restrict phi, float * restrict theta, float * restrict pp, float * restrict pc, float * restrict qp, float * restrict qc) { extern float* dev_pDx; extern float* dev_pDy; extern float* dev_qDx; extern float* dev_qDy; extern float* dev_vpz; extern float* dev_vsv; extern float* dev_epsilon; extern float* dev_delta; extern float* dev_phi; extern float* dev_theta; extern float* dev_ch1dxx; extern float* dev_ch1dyy; extern float* dev_ch1dzz; extern float* dev_ch1dxy; extern float* dev_ch1dyz; extern float* dev_ch1dxz; extern float* dev_v2px; extern float* dev_v2pz; extern float* dev_v2sz; extern float* dev_v2pn; extern float* dev_pp; extern float* dev_pc; extern float* dev_qp; extern float* dev_qc; int deviceCount; CUDA_CALL(cudaGetDeviceCount(&deviceCount)); const int device=1; cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); printf("CUDA source using device(%d) %s with compute capability %d.%d.\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); CUDA_CALL(cudaSetDevice(device)); // Check sx,sy values if (sx%BSIZE_X != 0) { printf("sx(%d) must be multiple of BSIZE_X(%d)\n", sx, (int)BSIZE_X); exit(1); } if (sy%BSIZE_Y != 0) { printf("sy(%d) must be multiple of BSIZE_Y(%d)\n", sy, (int)BSIZE_Y); exit(1); } sxsy=sx*sy; // one plan const size_t sxsysz=sxsy*sz; const size_t msize_vol=sxsysz*sizeof(float); const size_t msize_vol_extra=msize_vol+2*sxsy*sizeof(float); // 2 extra plans for wave fields CUDA_CALL(cudaMalloc(&dev_vpz, msize_vol)); CUDA_CALL(cudaMemcpy(dev_vpz, vpz, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_vsv, msize_vol)); CUDA_CALL(cudaMemcpy(dev_vsv, vsv, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_epsilon, msize_vol)); CUDA_CALL(cudaMemcpy(dev_epsilon, epsilon, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_delta, msize_vol)); CUDA_CALL(cudaMemcpy(dev_delta, delta, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_phi, msize_vol)); CUDA_CALL(cudaMemcpy(dev_phi, phi, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_theta, msize_vol)); CUDA_CALL(cudaMemcpy(dev_theta, theta, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_ch1dxx, msize_vol)); CUDA_CALL(cudaMemcpy(dev_ch1dxx, ch1dxx, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_ch1dyy, msize_vol)); CUDA_CALL(cudaMemcpy(dev_ch1dyy, ch1dyy, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_ch1dzz, msize_vol)); CUDA_CALL(cudaMemcpy(dev_ch1dzz, ch1dzz, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_ch1dxy, msize_vol)); CUDA_CALL(cudaMemcpy(dev_ch1dxy, ch1dxy, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_ch1dyz, msize_vol)); CUDA_CALL(cudaMemcpy(dev_ch1dyz, ch1dyz, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_ch1dxz, msize_vol)); CUDA_CALL(cudaMemcpy(dev_ch1dxz, ch1dxz, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_v2px, msize_vol)); CUDA_CALL(cudaMemcpy(dev_v2px, v2px, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_v2pz, msize_vol)); CUDA_CALL(cudaMemcpy(dev_v2pz, v2pz, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_v2sz, msize_vol)); CUDA_CALL(cudaMemcpy(dev_v2sz, v2sz, msize_vol, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&dev_v2pn, msize_vol)); CUDA_CALL(cudaMemcpy(dev_v2pn, v2pn, msize_vol, cudaMemcpyHostToDevice)); // Wave field arrays with an extra plan CUDA_CALL(cudaMalloc(&dev_pp, msize_vol_extra)); CUDA_CALL(cudaMemset(dev_pp, 0, msize_vol_extra)); CUDA_CALL(cudaMalloc(&dev_pc, msize_vol_extra)); CUDA_CALL(cudaMemset(dev_pc, 0, msize_vol_extra)); CUDA_CALL(cudaMalloc(&dev_qp, msize_vol_extra)); CUDA_CALL(cudaMemset(dev_qp, 0, msize_vol_extra)); CUDA_CALL(cudaMalloc(&dev_qc, msize_vol_extra)); CUDA_CALL(cudaMemset(dev_qc, 0, msize_vol_extra)); dev_pp+=sxsy; dev_pc+=sxsy; dev_qp+=sxsy; dev_qc+=sxsy; const size_t msize_vol_tyle=sxsy*DIM_Z_TILE*sizeof(float); CUDA_CALL(cudaMalloc(&dev_pDx, msize_vol_tyle)); CUDA_CALL(cudaMemset(dev_pDx, 0, msize_vol_tyle)); CUDA_CALL(cudaMalloc(&dev_pDy, msize_vol_tyle)); CUDA_CALL(cudaMemset(dev_pDy, 0, msize_vol_tyle)); CUDA_CALL(cudaMalloc(&dev_qDx, msize_vol_tyle)); CUDA_CALL(cudaMemset(dev_qDx, 0, msize_vol_tyle)); CUDA_CALL(cudaMalloc(&dev_qDy, msize_vol_tyle)); CUDA_CALL(cudaMemset(dev_qDy, 0, msize_vol_tyle)); CUDA_CALL(cudaGetLastError()); CUDA_CALL(cudaDeviceSynchronize()); printf("GPU memory usage = %ld MiB\n", 21*msize_vol/1024/1024); } void CUDA_Finalize() { extern float* dev_vpz; extern float* dev_vsv; extern float* dev_epsilon; extern float* dev_delta; extern float* dev_phi; extern float* dev_theta; extern float* dev_ch1dxx; extern float* dev_ch1dyy; extern float* dev_ch1dzz; extern float* dev_ch1dxy; extern float* dev_ch1dyz; extern float* dev_ch1dxz; extern float* dev_v2px; extern float* dev_v2pz; extern float* dev_v2sz; extern float* dev_v2pn; extern float* dev_pp; extern float* dev_pc; extern float* dev_qp; extern float* dev_qc; extern float* dev_pDx; extern float* dev_pDy; extern float* dev_qDx; extern float* dev_qDy; dev_pp-=sxsy; dev_pc-=sxsy; dev_qp-=sxsy; dev_qc-=sxsy; CUDA_CALL(cudaFree(dev_vpz)); CUDA_CALL(cudaFree(dev_vsv)); CUDA_CALL(cudaFree(dev_epsilon)); CUDA_CALL(cudaFree(dev_delta)); CUDA_CALL(cudaFree(dev_phi)); CUDA_CALL(cudaFree(dev_theta)); CUDA_CALL(cudaFree(dev_ch1dxx)); CUDA_CALL(cudaFree(dev_ch1dyy)); CUDA_CALL(cudaFree(dev_ch1dzz)); CUDA_CALL(cudaFree(dev_ch1dxy)); CUDA_CALL(cudaFree(dev_ch1dyz)); CUDA_CALL(cudaFree(dev_ch1dxz)); CUDA_CALL(cudaFree(dev_v2px)); CUDA_CALL(cudaFree(dev_v2pz)); CUDA_CALL(cudaFree(dev_v2sz)); CUDA_CALL(cudaFree(dev_v2pn)); CUDA_CALL(cudaFree(dev_pp)); CUDA_CALL(cudaFree(dev_pc)); CUDA_CALL(cudaFree(dev_qp)); CUDA_CALL(cudaFree(dev_qc)); CUDA_CALL(cudaFree(dev_pDx)); CUDA_CALL(cudaFree(dev_qDx)); CUDA_CALL(cudaFree(dev_pDy)); CUDA_CALL(cudaFree(dev_qDy)); printf("CUDA_Finalize: SUCCESS\n"); } void CUDA_Update_pointers(const int sx, const int sy, const int sz, float *pc) { extern float* dev_pc; const size_t sxsysz=((size_t)sx*sy)*sz; const size_t msize_vol=sxsysz*sizeof(float); if (pc) CUDA_CALL(cudaMemcpy(pc, dev_pc, msize_vol, cudaMemcpyDeviceToHost)); }
c90427d1d19417de9c1be67e529eab11fbee0aa2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /********************************************************************* *** *** *** Source code generated by kernel2cu.pl *** *** *** *** Please do not edit *** *** *** *********************************************************************/ #include "vglImage.h" #include "vglLoadShader.h" #include "vglContext.h" #include <iostream> //kernels /** vglCudaCopy Copy of image in cuda context. */ // <<<input->shape[VGL_HEIGHT],384>>> (IO_PBO: VglImage* input, IO_PBO: VglImage* output) // (input->cudaPtr, output->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels) template<typename T> __global__ void global_Copy(T* input, T* output, int w, int h, int nChannels){ int offset = blockIdx.x * nChannels * w; T* arr_in = input + offset; T* arr_out = output + offset; for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){ arr_out[j] = arr_in[j]; } } void vglCudaCopy(VglImage* input, VglImage* output){ if (!input){ printf("vglCudaCopy: Error: input parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContext(input, VGL_CUDA_CONTEXT); if (!input->cudaPtr){ printf("vglCudaCopy: Error: input->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } if (!output){ printf("vglCudaCopy: Error: output parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContext(output, VGL_CUDA_CONTEXT); if (!output->cudaPtr){ printf("vglCudaCopy: Error: output->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } switch (input->depth){ case (IPL_DEPTH_8U): hipLaunchKernelGGL(( global_Copy), dim3(input->shape[VGL_HEIGHT]),dim3(384), 0, 0, (unsigned char*)input->cudaPtr, (unsigned char*)output->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels); break; default: printf("vglCudaCopy: Error: unsupported img->depth = %d in file '%s' in line %i.\n", input->depth, __FILE__, __LINE__); exit(1); } vglSetContext(input, VGL_CUDA_CONTEXT); vglSetContext(output, VGL_CUDA_CONTEXT); } /** vglCudaInvert Inverts image stored in cuda context. */ // <<<input->shape[VGL_HEIGHT],384>>> (IN_PBO: VglImage* input, OUT_PBO: VglImage* output) // (input->cudaPtr, output->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels) template<typename T> __global__ void global_Invert(T* input, T* output, int w, int h, int nChannels){ int offset = blockIdx.x * nChannels * w; T* array_in = input + offset; T* array_out = output + offset; for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){ array_out[j] = -array_in[j]; } } void vglCudaInvert(VglImage* input, VglImage* output){ if (!input){ printf("vglCudaInvert: Error: input parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContext(input, VGL_CUDA_CONTEXT); if (!input->cudaPtr){ printf("vglCudaInvert: Error: input->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } if (!output){ printf("vglCudaInvert: Error: output parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContextForOutput(output, VGL_CUDA_CONTEXT); if (!output->cudaPtr){ printf("vglCudaInvert: Error: output->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } switch (input->depth){ case (IPL_DEPTH_8U): hipLaunchKernelGGL(( global_Invert), dim3(input->shape[VGL_HEIGHT]),dim3(384), 0, 0, (unsigned char*)input->cudaPtr, (unsigned char*)output->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels); break; default: printf("vglCudaInvert: Error: unsupported img->depth = %d in file '%s' in line %i.\n", input->depth, __FILE__, __LINE__); exit(1); } vglSetContext(output, VGL_CUDA_CONTEXT); } /** vglCudaInvertOnPlace Inverts image, stored in cuda context, on place. */ // <<<input->shape[VGL_HEIGHT],384>>> (IO_PBO: VglImage* input) // (input->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels) template<typename T> __global__ void global_InvertOnPlace(T* input, int w, int h, int nChannels){ T* array = input + blockIdx.x * nChannels * w; for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){ array[j] = -array[j]; } } void vglCudaInvertOnPlace(VglImage* input){ if (!input){ printf("vglCudaInvertOnPlace: Error: input parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContext(input, VGL_CUDA_CONTEXT); if (!input->cudaPtr){ printf("vglCudaInvertOnPlace: Error: input->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } switch (input->depth){ case (IPL_DEPTH_8U): hipLaunchKernelGGL(( global_InvertOnPlace), dim3(input->shape[VGL_HEIGHT]),dim3(384), 0, 0, (unsigned char*)input->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels); break; default: printf("vglCudaInvertOnPlace: Error: unsupported img->depth = %d in file '%s' in line %i.\n", input->depth, __FILE__, __LINE__); exit(1); } vglSetContext(input, VGL_CUDA_CONTEXT); }
c90427d1d19417de9c1be67e529eab11fbee0aa2.cu
/********************************************************************* *** *** *** Source code generated by kernel2cu.pl *** *** *** *** Please do not edit *** *** *** *********************************************************************/ #include "vglImage.h" #include "vglLoadShader.h" #include "vglContext.h" #include <iostream> //kernels /** vglCudaCopy Copy of image in cuda context. */ // <<<input->shape[VGL_HEIGHT],384>>> (IO_PBO: VglImage* input, IO_PBO: VglImage* output) // (input->cudaPtr, output->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels) template<typename T> __global__ void global_Copy(T* input, T* output, int w, int h, int nChannels){ int offset = blockIdx.x * nChannels * w; T* arr_in = input + offset; T* arr_out = output + offset; for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){ arr_out[j] = arr_in[j]; } } void vglCudaCopy(VglImage* input, VglImage* output){ if (!input){ printf("vglCudaCopy: Error: input parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContext(input, VGL_CUDA_CONTEXT); if (!input->cudaPtr){ printf("vglCudaCopy: Error: input->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } if (!output){ printf("vglCudaCopy: Error: output parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContext(output, VGL_CUDA_CONTEXT); if (!output->cudaPtr){ printf("vglCudaCopy: Error: output->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } switch (input->depth){ case (IPL_DEPTH_8U): global_Copy<<<input->shape[VGL_HEIGHT],384>>>((unsigned char*)input->cudaPtr, (unsigned char*)output->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels); break; default: printf("vglCudaCopy: Error: unsupported img->depth = %d in file '%s' in line %i.\n", input->depth, __FILE__, __LINE__); exit(1); } vglSetContext(input, VGL_CUDA_CONTEXT); vglSetContext(output, VGL_CUDA_CONTEXT); } /** vglCudaInvert Inverts image stored in cuda context. */ // <<<input->shape[VGL_HEIGHT],384>>> (IN_PBO: VglImage* input, OUT_PBO: VglImage* output) // (input->cudaPtr, output->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels) template<typename T> __global__ void global_Invert(T* input, T* output, int w, int h, int nChannels){ int offset = blockIdx.x * nChannels * w; T* array_in = input + offset; T* array_out = output + offset; for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){ array_out[j] = -array_in[j]; } } void vglCudaInvert(VglImage* input, VglImage* output){ if (!input){ printf("vglCudaInvert: Error: input parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContext(input, VGL_CUDA_CONTEXT); if (!input->cudaPtr){ printf("vglCudaInvert: Error: input->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } if (!output){ printf("vglCudaInvert: Error: output parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContextForOutput(output, VGL_CUDA_CONTEXT); if (!output->cudaPtr){ printf("vglCudaInvert: Error: output->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } switch (input->depth){ case (IPL_DEPTH_8U): global_Invert<<<input->shape[VGL_HEIGHT],384>>>((unsigned char*)input->cudaPtr, (unsigned char*)output->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels); break; default: printf("vglCudaInvert: Error: unsupported img->depth = %d in file '%s' in line %i.\n", input->depth, __FILE__, __LINE__); exit(1); } vglSetContext(output, VGL_CUDA_CONTEXT); } /** vglCudaInvertOnPlace Inverts image, stored in cuda context, on place. */ // <<<input->shape[VGL_HEIGHT],384>>> (IO_PBO: VglImage* input) // (input->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels) template<typename T> __global__ void global_InvertOnPlace(T* input, int w, int h, int nChannels){ T* array = input + blockIdx.x * nChannels * w; for (int j = threadIdx.x; j < nChannels * w; j += blockDim.x){ array[j] = -array[j]; } } void vglCudaInvertOnPlace(VglImage* input){ if (!input){ printf("vglCudaInvertOnPlace: Error: input parameter is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } vglCheckContext(input, VGL_CUDA_CONTEXT); if (!input->cudaPtr){ printf("vglCudaInvertOnPlace: Error: input->cudaPtr is null in file '%s' in line %i.\n", __FILE__, __LINE__); exit(1); } switch (input->depth){ case (IPL_DEPTH_8U): global_InvertOnPlace<<<input->shape[VGL_HEIGHT],384>>>((unsigned char*)input->cudaPtr, input->shape[VGL_WIDTH], input->shape[VGL_HEIGHT], input->nChannels); break; default: printf("vglCudaInvertOnPlace: Error: unsupported img->depth = %d in file '%s' in line %i.\n", input->depth, __FILE__, __LINE__); exit(1); } vglSetContext(input, VGL_CUDA_CONTEXT); }
92c4b1148f98f131d14f591425c995981bafb22d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/Exceptions.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "fbgemm_gpu/fbgemm_cuda_utils.cuh" #include "fbgemm_gpu/layout_transform_ops.cuh" #include "fbgemm_gpu/permute_pooled_embedding_ops.h" namespace fbgemm { at::Tensor permute_pooled_embs_gpu( const at::Tensor& pooled_embs, // [B_local][Sum_T_global(D)] const at::Tensor& offset_dim_list, const at::Tensor& permute_list, const at::Tensor& inv_offset_dim_list, const at::Tensor& inv_permute_list) { at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard; device_guard.set_index(pooled_embs.get_device()); // We couldn't pass the "pooled_embs.is_contiguous()" check in the backward // passs after D22767058. TODO: optimize and make sure pooled_embs is // contiguous. auto pooled_embs_contiguous = pooled_embs.contiguous(); const int64_t B = pooled_embs_contiguous.size(0); const int64_t T = permute_list.numel(); const int64_t dim_sum = pooled_embs_contiguous.size(1); // inv_permute_list is not being used so it's not checked here. TORCH_CHECK(pooled_embs_contiguous.device() == offset_dim_list.device()); TORCH_CHECK(pooled_embs_contiguous.device() == permute_list.device()); TORCH_CHECK(pooled_embs_contiguous.device() == inv_offset_dim_list.device()); TORCH_CHECK(offset_dim_list.numel() == permute_list.numel() + 1); TORCH_CHECK(offset_dim_list.numel() == inv_offset_dim_list.numel()); at::Tensor permuted_pooled_embs = at::empty_like(pooled_embs_contiguous); // This kernel is moving D elements per warp. // We are launching ( div_round_up(T, warp_per_block), B ) blocks. // The grid z dimension is also used by B in case it's greater than 65535. const int32_t warp_per_block = fbgemm_gpu::kMaxThreads / fbgemm_gpu::kWarpSize; const int32_t max_grid_dim_y = 32768; // The CUDA maximum is 65535, not a power of 2. const dim3 threads(fbgemm_gpu::kMaxThreads); const dim3 blocks( fbgemm_gpu::div_round_up(T, warp_per_block), ::min(static_cast<int32_t>(B), max_grid_dim_y), (B + max_grid_dim_y - 1) / max_grid_dim_y); AT_DISPATCH_FLOATING_TYPES_AND_HALF( pooled_embs_contiguous.type(), "permute_pooled_embeddings", ([&] { hipLaunchKernelGGL(( permute_pooled_embs_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), pooled_embs_contiguous.data_ptr<scalar_t>(), offset_dim_list.data_ptr<int64_t>(), permute_list.data_ptr<int64_t>(), inv_offset_dim_list.data_ptr<int64_t>(), permuted_pooled_embs.data_ptr<scalar_t>(), B, T, dim_sum); C10_HIP_KERNEL_LAUNCH_CHECK(); })); return permuted_pooled_embs; } } // namespace fbgemm
92c4b1148f98f131d14f591425c995981bafb22d.cu
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include <c10/cuda/CUDAGuard.h> #include <cuda.h> #include <cuda_runtime.h> #include "fbgemm_gpu/fbgemm_cuda_utils.cuh" #include "fbgemm_gpu/layout_transform_ops.cuh" #include "fbgemm_gpu/permute_pooled_embedding_ops.h" namespace fbgemm { at::Tensor permute_pooled_embs_gpu( const at::Tensor& pooled_embs, // [B_local][Sum_T_global(D)] const at::Tensor& offset_dim_list, const at::Tensor& permute_list, const at::Tensor& inv_offset_dim_list, const at::Tensor& inv_permute_list) { at::cuda::OptionalCUDAGuard device_guard; device_guard.set_index(pooled_embs.get_device()); // We couldn't pass the "pooled_embs.is_contiguous()" check in the backward // passs after D22767058. TODO: optimize and make sure pooled_embs is // contiguous. auto pooled_embs_contiguous = pooled_embs.contiguous(); const int64_t B = pooled_embs_contiguous.size(0); const int64_t T = permute_list.numel(); const int64_t dim_sum = pooled_embs_contiguous.size(1); // inv_permute_list is not being used so it's not checked here. TORCH_CHECK(pooled_embs_contiguous.device() == offset_dim_list.device()); TORCH_CHECK(pooled_embs_contiguous.device() == permute_list.device()); TORCH_CHECK(pooled_embs_contiguous.device() == inv_offset_dim_list.device()); TORCH_CHECK(offset_dim_list.numel() == permute_list.numel() + 1); TORCH_CHECK(offset_dim_list.numel() == inv_offset_dim_list.numel()); at::Tensor permuted_pooled_embs = at::empty_like(pooled_embs_contiguous); // This kernel is moving D elements per warp. // We are launching ( div_round_up(T, warp_per_block), B ) blocks. // The grid z dimension is also used by B in case it's greater than 65535. const int32_t warp_per_block = fbgemm_gpu::kMaxThreads / fbgemm_gpu::kWarpSize; const int32_t max_grid_dim_y = 32768; // The CUDA maximum is 65535, not a power of 2. const dim3 threads(fbgemm_gpu::kMaxThreads); const dim3 blocks( fbgemm_gpu::div_round_up(T, warp_per_block), std::min(static_cast<int32_t>(B), max_grid_dim_y), (B + max_grid_dim_y - 1) / max_grid_dim_y); AT_DISPATCH_FLOATING_TYPES_AND_HALF( pooled_embs_contiguous.type(), "permute_pooled_embeddings", ([&] { permute_pooled_embs_kernel<scalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( pooled_embs_contiguous.data_ptr<scalar_t>(), offset_dim_list.data_ptr<int64_t>(), permute_list.data_ptr<int64_t>(), inv_offset_dim_list.data_ptr<int64_t>(), permuted_pooled_embs.data_ptr<scalar_t>(), B, T, dim_sum); C10_CUDA_KERNEL_LAUNCH_CHECK(); })); return permuted_pooled_embs; } } // namespace fbgemm
79e7af324c096ec0559c1fd591a42ec589fcbe3c.hip
// !!! This is a file automatically generated by hipify!!! #include "monolithic.h" long int SH_MEM_SZ; // prints error number and exits if error is detected void cu_error_check(hipError_t error) { if (error) { printf("Error #%d occured. Exiting. \n", error); exit(error); } } // stringfy the dim3 value string str_dim3(int *dim_3) { string first = to_string(dim_3[0]); string second = to_string(dim_3[1]); string third = to_string(dim_3[2]); return "( " + first + ", " + second + ", " + third + " )"; } void save_dev_prop(hipDeviceProp_t dev_prop, bool force_create) { string file_name = dev_prop.name + (string)"_properites"; string path_to_file = (string)"./device_properties/" + file_name; if ( !file_exists(path_to_file) || force_create) { ofstream dev_prop_file; dev_prop_file.open(path_to_file); dev_prop_file << "Device Name = " << dev_prop.name << endl; dev_prop_file << "Clock rate (MHz) = " << dev_prop.clockRate / 1024UL << endl; dev_prop_file << "MP count = " << dev_prop.multiProcessorCount << endl; dev_prop_file << "Max blocks per MP = " << dev_prop.maxBlocksPerMultiProcessor << endl; dev_prop_file << "Registers per MP = " << dev_prop.regsPerMultiprocessor << endl; dev_prop_file << "Registers per block = " << dev_prop.regsPerBlock << endl; dev_prop_file << "Max threads per MP = " << dev_prop.maxThreadsPerMultiProcessor << endl; dev_prop_file << "Max threads per block = " << dev_prop.maxThreadsPerBlock << endl; dev_prop_file << "Max thread dims per block " << str_dim3(dev_prop.maxThreadsDim) << endl; dev_prop_file << "Max Grid Size: " << str_dim3(dev_prop.maxGridSize) << endl; dev_prop_file << "Memory Clock Rate (MHz) = " << dev_prop.memoryClockRate / 1000 << endl; dev_prop_file << "Warp Size = " << dev_prop.warpSize << endl; dev_prop_file << "Shared Memory per MP (KB) = " << dev_prop.sharedMemPerMultiprocessor / 1024UL << endl; dev_prop_file << "Shared Memory per Block (KB) = " << dev_prop.sharedMemPerBlock / 1024UL << endl; dev_prop_file << "L2 Cache Size (KB) = " << dev_prop.l2CacheSize / 1024UL << endl; dev_prop_file << "Global Memory Size (GB) = " << (long int) (dev_prop.totalGlobalMem / 1e9) << endl; dev_prop_file.close(); } } // init cuda and check for possible errors/compatiblility issues int init(bool force_create) { // printf("Initializing CUDA...\n"); hipInit(0); int dev_count; cu_error_check(hipGetDeviceCount(&dev_count)); if (dev_count == 0) { printf("There are no devices that support CUDA.\n"); exit (0); } // get handle for device 0 hipDevice_t dev; cu_error_check(hipDeviceGet(&dev, 0)); // get dev 0 properties hipDeviceProp_t dev_prop; cu_error_check((hipError_t) hipGetDeviceProperties(&dev_prop, dev)); // ensure device arch is volta or higher if (dev_prop.major < 7) { printf("cudaTensorCoreGemm requires SM 7.0 or higher to use Tensor Cores. Exiting...\n"); exit(-1); } SH_MEM_SZ = dev_prop.sharedMemPerMultiprocessor; save_dev_prop(dev_prop, force_create); // printf("Initialization is complete.\n\n\n"); return dev_prop.multiProcessorCount; }
79e7af324c096ec0559c1fd591a42ec589fcbe3c.cu
#include "monolithic.h" long int SH_MEM_SZ; // prints error number and exits if error is detected void cu_error_check(CUresult error) { if (error) { printf("Error #%d occured. Exiting. \n", error); exit(error); } } // stringfy the dim3 value string str_dim3(int *dim_3) { string first = to_string(dim_3[0]); string second = to_string(dim_3[1]); string third = to_string(dim_3[2]); return "( " + first + ", " + second + ", " + third + " )"; } void save_dev_prop(cudaDeviceProp dev_prop, bool force_create) { string file_name = dev_prop.name + (string)"_properites"; string path_to_file = (string)"./device_properties/" + file_name; if ( !file_exists(path_to_file) || force_create) { ofstream dev_prop_file; dev_prop_file.open(path_to_file); dev_prop_file << "Device Name = " << dev_prop.name << endl; dev_prop_file << "Clock rate (MHz) = " << dev_prop.clockRate / 1024UL << endl; dev_prop_file << "MP count = " << dev_prop.multiProcessorCount << endl; dev_prop_file << "Max blocks per MP = " << dev_prop.maxBlocksPerMultiProcessor << endl; dev_prop_file << "Registers per MP = " << dev_prop.regsPerMultiprocessor << endl; dev_prop_file << "Registers per block = " << dev_prop.regsPerBlock << endl; dev_prop_file << "Max threads per MP = " << dev_prop.maxThreadsPerMultiProcessor << endl; dev_prop_file << "Max threads per block = " << dev_prop.maxThreadsPerBlock << endl; dev_prop_file << "Max thread dims per block " << str_dim3(dev_prop.maxThreadsDim) << endl; dev_prop_file << "Max Grid Size: " << str_dim3(dev_prop.maxGridSize) << endl; dev_prop_file << "Memory Clock Rate (MHz) = " << dev_prop.memoryClockRate / 1000 << endl; dev_prop_file << "Warp Size = " << dev_prop.warpSize << endl; dev_prop_file << "Shared Memory per MP (KB) = " << dev_prop.sharedMemPerMultiprocessor / 1024UL << endl; dev_prop_file << "Shared Memory per Block (KB) = " << dev_prop.sharedMemPerBlock / 1024UL << endl; dev_prop_file << "L2 Cache Size (KB) = " << dev_prop.l2CacheSize / 1024UL << endl; dev_prop_file << "Global Memory Size (GB) = " << (long int) (dev_prop.totalGlobalMem / 1e9) << endl; dev_prop_file.close(); } } // init cuda and check for possible errors/compatiblility issues int init(bool force_create) { // printf("Initializing CUDA...\n"); cuInit(0); int dev_count; cu_error_check(cuDeviceGetCount(&dev_count)); if (dev_count == 0) { printf("There are no devices that support CUDA.\n"); exit (0); } // get handle for device 0 CUdevice dev; cu_error_check(cuDeviceGet(&dev, 0)); // get dev 0 properties cudaDeviceProp dev_prop; cu_error_check((CUresult) cudaGetDeviceProperties(&dev_prop, dev)); // ensure device arch is volta or higher if (dev_prop.major < 7) { printf("cudaTensorCoreGemm requires SM 7.0 or higher to use Tensor Cores. Exiting...\n"); exit(-1); } SH_MEM_SZ = dev_prop.sharedMemPerMultiprocessor; save_dev_prop(dev_prop, force_create); // printf("Initialization is complete.\n\n\n"); return dev_prop.multiProcessorCount; }
365e3e10d014eaae4c8d346a74d844914d1bef7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file spatial_transformer.cu * \brief * \author Wei Wu */ #include "./spatial_transformer-inl.h" #include <algorithm> #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 #include "./cudnn_spatial_transformer-inl.h" #endif // MXNET_USE_CUDNN && CUDNN_MAJOR namespace mshadow { template<typename DType> __device__ bool between(DType value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } template<typename DType> __global__ void /* * In order to not generate the code that uses too many * registers (resulting in too many resources requested * error) we need to tell the compiler that we will be * launching this kernel with cuda::kMaxThreadsPerBlock * threads per block. Setting __launch_bounds__ ensures * that such configuration can always be launched. */ __launch_bounds__(cuda::kMaxThreadsPerBlock, 1) BilinearSamplingForwardKernel(const int i_c, const int i_h, const int i_w, const DType* data, const DType* grid, const int o_n, const int o_c, const int o_h, const int o_w, DType* out) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_c * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in out int w = index % o_w; int h = (index / o_w) % o_h; int c = (index / o_w / o_h) % o_c; int n = index / o_w / o_h / o_c; index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; index_t grid_index = n * o_h * o_w * 2 + h * o_w + w; DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2; DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) top_left_v = *(data + data_index); if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) top_right_v = *(data + data_index + 1); if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1)) bottom_left_v = *(data + data_index + i_w); if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1)) bottom_right_v = *(data + data_index + i_w + 1); *(out+out_index) = top_left_v * top_left_y_w * top_left_x_w + top_right_v * top_left_y_w * (1.0 - top_left_x_w) + bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w + bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); } } /* * In order to not generate the code that uses too many * registers (resulting in too many resources requested * error) we need to tell the compiler that we will be * launching this kernel with cuda::kMaxThreadsPerBlock * threads per block. Setting __launch_bounds__ ensures * that such configuration can always be launched. */ template<typename DType> __global__ void __launch_bounds__(cuda::kMaxThreadsPerBlock, 1) BilinearSamplingBackwardKernel(const int i_c, const int i_h, const int i_w, const DType* grad, const DType* data, const int o_n, const int o_c, const int o_h, const int o_w, DType* g_input, DType* grid_src) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in grad int w = index % o_w; int h = (index / o_w) % o_h; int n = index / o_w / o_h; DType top_left_y_gw = 0.0; DType top_left_x_gw = 0.0; index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w; DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2; DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); for (index_t c = 0; c < o_c; ++c) { index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; // calc 4 vertex value in input data DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; // calc input grad if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { atomicAdd((g_input + data_index), *(grad + grad_index) * top_left_y_w * top_left_x_w); top_left_v = *(data + data_index); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { atomicAdd((g_input + data_index + 1), *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w)); top_right_v = *(data + data_index + 1); } if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { atomicAdd((g_input + data_index + i_w), *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w); bottom_left_v = *(data + data_index + i_w); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { atomicAdd((g_input + data_index + i_w + 1), *(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w)); bottom_right_v = *(data + data_index + i_w + 1); } // calc weight grad of top_left_w, then multiple -1 is the grad of grid_src top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v + (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_x_w); top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v + (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w); } // calc grid_src grad *(grid_src + grid_src_index + o_h * o_w) = top_left_y_gw * (i_h - 1) / 2; *(grid_src + grid_src_index) = top_left_x_gw * (i_w - 1) / 2; } } template<typename DType> inline void BilinearSamplingForward(const Tensor<gpu, 4, DType> &output, const Tensor<gpu, 4, DType> &input, const Tensor<gpu, 3, DType> grid_src) { DType *out = output.dptr_; const DType *data = input.dptr_; const DType *grid = grid_src.dptr_; int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3); int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3); using namespace cuda; const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer forward"); hipStream_t stream = Stream<gpu>::GetStream(output.stream_); BilinearSamplingForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out); MSHADOW_CUDA_POST_KERNEL_CHECK(BilinearSamplingForwardKernel); } template<typename DType> inline void BilinearSamplingBackward(const Tensor<gpu, 4, DType> &input_grad, const Tensor<gpu, 3, DType> &grid_src_data, const Tensor<gpu, 4, DType> &output_grad, const Tensor<gpu, 4, DType> &input_data) { DType *g_input = input_grad.dptr_; DType *grid_src = grid_src_data.dptr_; const DType *grad = output_grad.dptr_; const DType *data = input_data.dptr_; int o_n = output_grad.size(0), o_c = output_grad.size(1), o_h = output_grad.size(2), o_w = output_grad.size(3); int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3); using namespace cuda; const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer backward"); hipStream_t stream = Stream<gpu>::GetStream(input_grad.stream_); BilinearSamplingBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src); MSHADOW_CUDA_POST_KERNEL_CHECK(BilinearSamplingBackwardKernel); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(SpatialTransformerParam param, int dtype) { Operator *op = NULL; #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { if (param.cudnn_off.has_value() && param.cudnn_off.value()) { op = new SpatialTransformerOp<gpu, DType>(param); } else { op = new CuDNNSpatialTransformerOp<DType>(param); } }) #else MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new SpatialTransformerOp<gpu, DType>(param); }) #endif // MXNET_USE_CUDNN && CUDNN_MAJOR return op; } } // namespace op } // namespace mxnet
365e3e10d014eaae4c8d346a74d844914d1bef7e.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file spatial_transformer.cu * \brief * \author Wei Wu */ #include "./spatial_transformer-inl.h" #include <algorithm> #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 #include "./cudnn_spatial_transformer-inl.h" #endif // MXNET_USE_CUDNN && CUDNN_MAJOR namespace mshadow { template<typename DType> __device__ bool between(DType value, int lowerBound, int upperBound) { return (value >= lowerBound && value <= upperBound); } template<typename DType> __global__ void /* * In order to not generate the code that uses too many * registers (resulting in too many resources requested * error) we need to tell the compiler that we will be * launching this kernel with cuda::kMaxThreadsPerBlock * threads per block. Setting __launch_bounds__ ensures * that such configuration can always be launched. */ __launch_bounds__(cuda::kMaxThreadsPerBlock, 1) BilinearSamplingForwardKernel(const int i_c, const int i_h, const int i_w, const DType* data, const DType* grid, const int o_n, const int o_c, const int o_h, const int o_w, DType* out) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_c * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in out int w = index % o_w; int h = (index / o_w) % o_h; int c = (index / o_w / o_h) % o_c; int n = index / o_w / o_h / o_c; index_t out_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; index_t grid_index = n * o_h * o_w * 2 + h * o_w + w; DType y_real = (*(grid + grid_index + o_h * o_w) + 1) * (i_h - 1) / 2; DType x_real = (*(grid + grid_index) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) top_left_v = *(data + data_index); if (between(top_left_x + 1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) top_right_v = *(data + data_index + 1); if (between(top_left_x, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1)) bottom_left_v = *(data + data_index + i_w); if (between(top_left_x+1, 0, i_w-1) && between(top_left_y + 1, 0, i_h-1)) bottom_right_v = *(data + data_index + i_w + 1); *(out+out_index) = top_left_v * top_left_y_w * top_left_x_w + top_right_v * top_left_y_w * (1.0 - top_left_x_w) + bottom_left_v * (1.0 - top_left_y_w) * top_left_x_w + bottom_right_v * (1.0 - top_left_y_w) * (1.0 - top_left_x_w); } } /* * In order to not generate the code that uses too many * registers (resulting in too many resources requested * error) we need to tell the compiler that we will be * launching this kernel with cuda::kMaxThreadsPerBlock * threads per block. Setting __launch_bounds__ ensures * that such configuration can always be launched. */ template<typename DType> __global__ void __launch_bounds__(cuda::kMaxThreadsPerBlock, 1) BilinearSamplingBackwardKernel(const int i_c, const int i_h, const int i_w, const DType* grad, const DType* data, const int o_n, const int o_c, const int o_h, const int o_w, DType* g_input, DType* grid_src) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < o_n * o_h * o_w; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) is the element in grad int w = index % o_w; int h = (index / o_w) % o_h; int n = index / o_w / o_h; DType top_left_y_gw = 0.0; DType top_left_x_gw = 0.0; index_t grid_src_index = n * o_h * o_w * 2 + h * o_w + w; DType y_real = (*(grid_src + grid_src_index + o_h * o_w) + 1) * (i_h - 1) / 2; DType x_real = (*(grid_src + grid_src_index) + 1) * (i_w - 1) / 2; int top_left_y = static_cast<int>(floor(y_real)); int top_left_x = static_cast<int>(floor(x_real)); DType top_left_y_w = 1.0 - (y_real - top_left_y); DType top_left_x_w = 1.0 - (x_real - top_left_x); for (index_t c = 0; c < o_c; ++c) { index_t grad_index = n * o_c * o_h * o_w + c * o_h * o_w + h * o_w + w; int data_index = n * i_c * i_h * i_w + c * i_h * i_w + top_left_y * i_w + top_left_x; // calc 4 vertex value in input data DType top_left_v = 0; DType top_right_v = 0; DType bottom_left_v = 0; DType bottom_right_v = 0; // calc input grad if (between(top_left_x, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { atomicAdd((g_input + data_index), *(grad + grad_index) * top_left_y_w * top_left_x_w); top_left_v = *(data + data_index); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y, 0, i_h-1)) { atomicAdd((g_input + data_index + 1), *(grad + grad_index) * top_left_y_w * (1.0 - top_left_x_w)); top_right_v = *(data + data_index + 1); } if (between(top_left_x, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { atomicAdd((g_input + data_index + i_w), *(grad + grad_index) * (1.0 - top_left_y_w) * top_left_x_w); bottom_left_v = *(data + data_index + i_w); } if (between(top_left_x+1, 0, i_w-1) && between(top_left_y+1, 0, i_h-1)) { atomicAdd((g_input + data_index + i_w + 1), *(grad + grad_index) * (1.0 - top_left_y_w) * (1.0 - top_left_x_w)); bottom_right_v = *(data + data_index + i_w + 1); } // calc weight grad of top_left_w, then multiple -1 is the grad of grid_src top_left_y_gw -= *(grad + grad_index) * (top_right_v - bottom_right_v + (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_x_w); top_left_x_gw -= *(grad + grad_index) * (bottom_left_v - bottom_right_v + (top_left_v - top_right_v - bottom_left_v + bottom_right_v) * top_left_y_w); } // calc grid_src grad *(grid_src + grid_src_index + o_h * o_w) = top_left_y_gw * (i_h - 1) / 2; *(grid_src + grid_src_index) = top_left_x_gw * (i_w - 1) / 2; } } template<typename DType> inline void BilinearSamplingForward(const Tensor<gpu, 4, DType> &output, const Tensor<gpu, 4, DType> &input, const Tensor<gpu, 3, DType> grid_src) { DType *out = output.dptr_; const DType *data = input.dptr_; const DType *grid = grid_src.dptr_; int o_n = output.size(0), o_c = output.size(1), o_h = output.size(2), o_w = output.size(3); int i_c = input.size(1), i_h = input.size(2), i_w = input.size(3); using namespace cuda; const int max_block = (output.shape_.Size() + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer forward"); cudaStream_t stream = Stream<gpu>::GetStream(output.stream_); BilinearSamplingForwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, data, grid, o_n, o_c, o_h, o_w, out); MSHADOW_CUDA_POST_KERNEL_CHECK(BilinearSamplingForwardKernel); } template<typename DType> inline void BilinearSamplingBackward(const Tensor<gpu, 4, DType> &input_grad, const Tensor<gpu, 3, DType> &grid_src_data, const Tensor<gpu, 4, DType> &output_grad, const Tensor<gpu, 4, DType> &input_data) { DType *g_input = input_grad.dptr_; DType *grid_src = grid_src_data.dptr_; const DType *grad = output_grad.dptr_; const DType *data = input_data.dptr_; int o_n = output_grad.size(0), o_c = output_grad.size(1), o_h = output_grad.size(2), o_w = output_grad.size(3); int i_c = input_data.size(1), i_h = input_data.size(2), i_w = input_data.size(3); using namespace cuda; const int max_block = (output_grad.shape_.Size() / o_c + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 num_blocks(kMaxGridDim, (max_block + kMaxGridDim - 1) / kMaxGridDim); dim3 threads_per_block(kMaxThreadsPerBlock); CheckLaunchParam(num_blocks, threads_per_block, "spatial transformer backward"); cudaStream_t stream = Stream<gpu>::GetStream(input_grad.stream_); BilinearSamplingBackwardKernel<DType> << <num_blocks, threads_per_block, 0, stream >> >( i_c, i_h, i_w, grad, data, o_n, o_c, o_h, o_w, g_input, grid_src); MSHADOW_CUDA_POST_KERNEL_CHECK(BilinearSamplingBackwardKernel); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(SpatialTransformerParam param, int dtype) { Operator *op = NULL; #if MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 5 MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { if (param.cudnn_off.has_value() && param.cudnn_off.value()) { op = new SpatialTransformerOp<gpu, DType>(param); } else { op = new CuDNNSpatialTransformerOp<DType>(param); } }) #else MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new SpatialTransformerOp<gpu, DType>(param); }) #endif // MXNET_USE_CUDNN && CUDNN_MAJOR return op; } } // namespace op } // namespace mxnet
65b7682130e2d55a0a15e2bf35e67ffaf6d411e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // [email protected] #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void SigmoidFocalLossForward(const int nthreads, const T* logits, const int* targets, const int num_classes, const float gamma, const float alpha, const int num, T* losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80]; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where T term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // p**gamma * log(1-p) T term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename T> __global__ void SigmoidFocalLossBackward(const int nthreads, const T* logits, const int* targets, const T* d_losses, const int num_classes, const float gamma, const float alpha, const int num, T* d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80], 0 is background; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p) T term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // (p**g) * (g*(1-p)*log(1-p) - p) T term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward namespace cvpods { at::Tensor SigmoidFocalLoss_forward_cuda( const at::Tensor& logits, const at::Tensor& targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.device().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.device().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(hipGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_forward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossForward<scalar_t>), dim3(grid), dim3(block), 0, stream, losses_size, logits.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), num_classes, gamma, alpha, num_samples, losses.data_ptr<scalar_t>()); }); THCudaCheck(hipGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda( const at::Tensor& logits, const at::Tensor& targets, const at::Tensor& d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.device().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.device().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.device().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(hipGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_backward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, d_logits_size, logits.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), d_losses.contiguous().data_ptr<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data_ptr<scalar_t>()); }); THCudaCheck(hipGetLastError()); return d_logits; } }
65b7682130e2d55a0a15e2bf35e67ffaf6d411e2.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // [email protected] #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void SigmoidFocalLossForward(const int nthreads, const T* logits, const int* targets, const int num_classes, const float gamma, const float alpha, const int num, T* losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80]; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where T term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // p**gamma * log(1-p) T term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename T> __global__ void SigmoidFocalLossBackward(const int nthreads, const T* logits, const int* targets, const T* d_losses, const int num_classes, const float gamma, const float alpha, const int num, T* d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80], 0 is background; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p) T term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // (p**g) * (g*(1-p)*log(1-p) - p) T term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward namespace cvpods { at::Tensor SigmoidFocalLoss_forward_cuda( const at::Tensor& logits, const at::Tensor& targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.device().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.device().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(cudaGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_forward", [&] { SigmoidFocalLossForward<scalar_t><<<grid, block, 0, stream>>>( losses_size, logits.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), num_classes, gamma, alpha, num_samples, losses.data_ptr<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda( const at::Tensor& logits, const at::Tensor& targets, const at::Tensor& d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.device().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.device().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.device().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(cudaGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SigmoidFocalLoss_backward", [&] { SigmoidFocalLossBackward<scalar_t><<<grid, block, 0, stream>>>( d_logits_size, logits.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), d_losses.contiguous().data_ptr<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data_ptr<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return d_logits; } }
ceaa6328b0c600a6846555adcd938912927944cc.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_utils.cu * * @brief Utility Routines for Tests */ #include <gunrock/util/test_utils.h> namespace gunrock { namespace util { /****************************************************************************** * Device initialization ******************************************************************************/ void DeviceInit(CommandLineArgs &args) { int deviceCount; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No devices supporting CUDA.\n"); exit(1); } int dev = 0; args.GetCmdLineArgument("device", dev); if (dev < 0) { dev = 0; } if (dev > deviceCount - 1) { dev = deviceCount - 1; } hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (deviceProp.major < 1) { fprintf(stderr, "Device does not support CUDA.\n"); exit(1); } if (!args.CheckCmdLineFlag("quiet")) { printf("Using device %d: %s\n", dev, deviceProp.name); } hipSetDevice(dev); } } //util } //gunrock
ceaa6328b0c600a6846555adcd938912927944cc.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_utils.cu * * @brief Utility Routines for Tests */ #include <gunrock/util/test_utils.h> namespace gunrock { namespace util { /****************************************************************************** * Device initialization ******************************************************************************/ void DeviceInit(CommandLineArgs &args) { int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No devices supporting CUDA.\n"); exit(1); } int dev = 0; args.GetCmdLineArgument("device", dev); if (dev < 0) { dev = 0; } if (dev > deviceCount - 1) { dev = deviceCount - 1; } cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (deviceProp.major < 1) { fprintf(stderr, "Device does not support CUDA.\n"); exit(1); } if (!args.CheckCmdLineFlag("quiet")) { printf("Using device %d: %s\n", dev, deviceProp.name); } cudaSetDevice(dev); } } //util } //gunrock
sumArrayOnGPU-time.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/time.h> void initialData(float* ip, int size){ time_t t; srand((unsigned int)time(&t)); for(int i = 0; i<size; i++){ ip[i] = (float)(rand() & 0xFF)/10.0f; } } void checkResult(float *hostRef, float *gpuRef, const int N){ double epsilon = 1.0E-8; bool match = 1; for(int i = 0; i<N; i++){ if(abs(hostRef[i] - gpuRef[i]) > epsilon){ match = 0; printf("Arrays do not match!\n"); printf("Host %5.2f GPU %5.2f at current %d", hostRef[i], gpuRef[i], i); break; } } if(match) printf("Arrays match. \n \n"); } double cpuSecond(){ struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void sumArraysOnHost(float* A, float* B, float* C, const int N){ for(int idx = 0; idx<N; idx++){ C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < N) C[i] = A[i] + B[i]; } int main(int argc, char **argv){ printf("%s Starting... \n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf("Using Device %d:%s\n", dev, deviceProp.name); hipSetDevice(dev); // set up date size of vectors int nElem = 1<<24; printf("Vector size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); double iStart, iElaps; // initialize data at host side iStart = cpuSecond(); initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add vector at host side for result checks iStart = cpuSecond(); sumArraysOnHost(h_A, h_B, hostRef, nElem); iElaps = cpuSecond() - iStart; // malloc device global memory float *d_A, *d_B, *d_C; hipMalloc((float **)&d_A, nBytes); hipMalloc((float **)&d_B, nBytes); hipMalloc((float **)&d_C, nBytes); // transfer data from host to device hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); // invoke kernel at host side int iLen = 1024; dim3 block(iLen); dim3 grid((nElem+block.x-1)/block.x); iStart = cpuSecond(); hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C, nElem); hipDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("sumArraysOnGPU <<< %d, %d >>> Time elapsed %f sec\n", grid.x, block.x, iElaps); // copy kernel result back to host side hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); // check device results checkResult(hostRef, gpuRef, nElem); // free device global memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); return 0; }
sumArrayOnGPU-time.cu
#include <cuda_runtime.h> #include <stdio.h> #include <sys/time.h> void initialData(float* ip, int size){ time_t t; srand((unsigned int)time(&t)); for(int i = 0; i<size; i++){ ip[i] = (float)(rand() & 0xFF)/10.0f; } } void checkResult(float *hostRef, float *gpuRef, const int N){ double epsilon = 1.0E-8; bool match = 1; for(int i = 0; i<N; i++){ if(abs(hostRef[i] - gpuRef[i]) > epsilon){ match = 0; printf("Arrays do not match!\n"); printf("Host %5.2f GPU %5.2f at current %d", hostRef[i], gpuRef[i], i); break; } } if(match) printf("Arrays match. \n \n"); } double cpuSecond(){ struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void sumArraysOnHost(float* A, float* B, float* C, const int N){ for(int idx = 0; idx<N; idx++){ C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < N) C[i] = A[i] + B[i]; } int main(int argc, char **argv){ printf("%s Starting... \n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Using Device %d:%s\n", dev, deviceProp.name); cudaSetDevice(dev); // set up date size of vectors int nElem = 1<<24; printf("Vector size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); double iStart, iElaps; // initialize data at host side iStart = cpuSecond(); initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // add vector at host side for result checks iStart = cpuSecond(); sumArraysOnHost(h_A, h_B, hostRef, nElem); iElaps = cpuSecond() - iStart; // malloc device global memory float *d_A, *d_B, *d_C; cudaMalloc((float **)&d_A, nBytes); cudaMalloc((float **)&d_B, nBytes); cudaMalloc((float **)&d_C, nBytes); // transfer data from host to device cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); // invoke kernel at host side int iLen = 1024; dim3 block(iLen); dim3 grid((nElem+block.x-1)/block.x); iStart = cpuSecond(); sumArraysOnGPU<<< grid, block >>>(d_A, d_B, d_C, nElem); cudaDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("sumArraysOnGPU <<< %d, %d >>> Time elapsed %f sec\n", grid.x, block.x, iElaps); // copy kernel result back to host side cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); // check device results checkResult(hostRef, gpuRef, nElem); // free device global memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); return 0; }
9f3d24768d0181dd2d1095f22f001f2f9a35b8c8.hip
// !!! This is a file automatically generated by hipify!!! #include <cstring> #include <stdexcept> #include <cstdlib> #include <fstream> #include <iostream> #include <vector> #include "utils.h" #include "cuda_error_check.cuh" #include "initial_graph.hpp" #include "parse_graph.hpp" #include "opt.cu" #include "impl2.cu" #include "impl1.cu" #define SSSP_INF 1073741824 enum class ProcessingType {Push, Neighbor, Own, Unknown}; enum SyncMode {InCore, OutOfCore}; enum SyncMode syncMethod; enum SmemMode {UseSmem, UseNoSmem}; enum SmemMode smemMethod; // Open files safely. template <typename T_file> void openFileToAccess( T_file& input_file, std::string file_name ) { input_file.open( file_name.c_str() ); if( !input_file ) throw std::runtime_error( "Failed to open specified file: " + file_name + "\n" ); } void myTestCorrectness(std::vector<initial_vertex> * parsedGraph, const char* outputFileName) { std::cout << std::endl << "TESTING CORRECTNESS" << std::endl; std::cout << "RUNNING SEQUENTIAL BMF..." << std::endl; int vertex_size = (*parsedGraph).size(); int *d= new int[vertex_size]; for (int i = 0; i < vertex_size; ++i) d[i] = SSSP_INF; d[0]=0; int change = 0; for (int k = 1; k < vertex_size; k++){ for (int i = 0; i < vertex_size; i++){ std::vector<neighbor> nbrs = (*parsedGraph)[i].nbrs; for (int j = 0; j < nbrs.size(); ++j){ int u = nbrs[j].srcIndex; int v = i; int w = nbrs[j].edgeValue.weight; if ((d[u] + w) < d[v]){ d[v] = d[u]+w; change = 1; } } } if (change == 0) break; change = 0; } //Compare the distance array and the parallel output file std::ifstream outputFile; openFileToAccess< std::ifstream >( outputFile, std::string( outputFileName ) ); std::string line; int i = 0; int incorrect = 0; while (getline(outputFile,line)) { std::string curr = (d[i] < SSSP_INF) ? (std::to_string(i) + ":" + std::to_string(d[i])):(std::to_string(i) +":" + "INF"); // std::cout << std::to_string(line.compare(curr)) << std::endl; if(line.compare(curr) != 0) { incorrect++; std::cout << "Correct: " << curr << "\tYours: " << line << std::endl; } i++; } if(i != vertex_size) { std::cout << "Insufficient vertices found in outputfile" << std::endl; std::cout << "Expected: " << vertex_size << "Found: " << i << std::endl; return; } std::cout << "Correct: " << std::to_string(vertex_size-incorrect) << "\t Incorrect: " << std::to_string(incorrect) << " \t Total: " << std::to_string(vertex_size) << std::endl; outputFile.close(); } void testCorrectness(edge_node *edges, const char* outputFileName, uint nVertices, uint nEdges) { std::cout << std::endl << "TESTING CORRECTNESS" << std::endl; std::cout << "RUNNING SEQUENTIAL BMF..." << std::endl; unsigned int *d= new unsigned int[nVertices]; d[0]=0; for (int i = 1; i < nVertices; ++i){ d[i] = UINT_MAX; } int change = 0; for(int i = 1; i < nVertices; i++){ for(int j = 0; j < nEdges; j++){ int u = edges[j].srcIndex; int v = edges[j].destIndex; int w = edges[j].weight; if(d[u] == UINT_MAX){ continue; } else if(d[u]+w < d[v]){ d[v] = d[u]+w; change = 1; } } if(!change){ break; } change = 0; } //Compare the distance array and the parallel output file std::ifstream outputFile; openFileToAccess< std::ifstream >( outputFile, std::string( outputFileName ) ); std::string line; int i = 0; int incorrect = 0; while (getline(outputFile,line)) { std::string curr = (d[i] < UINT_MAX) ? (std::to_string(i) + ":" + std::to_string(d[i])):(std::to_string(i) +":" + "INF"); if(line.compare(curr) != 0) { incorrect++; // std::cout << "Correct: " << curr << "\tYours: " << line << std::endl; } i++; } if(i != nVertices) { std::cout << "Insufficient vertices found in outputfile" << std::endl; std::cout << "Expected: " << nVertices << "Found: " << i << std::endl; return; } std::cout << "Correct: " << std::to_string(nVertices-incorrect) << "\t Incorrect: " << std::to_string(incorrect) << " \t Total: " << std::to_string(nVertices) << std::endl; outputFile.close(); } // Execution entry point. int main( int argc, char** argv ) { std::string usage = "\tRequired command line arguments:\n\ Input file: E.g., --input in.txt\n\ Block size: E.g., --bsize 512\n\ Block count: E.g., --bcount 192\n\ Output path: E.g., --output output.txt\n\ Processing method: E.g., --method bmf (bellman-ford), or tpe (to-process-edge), or opt (one further optimizations)\n\ Shared memory usage: E.g., --usesmem yes, or no \n\ Sync method: E.g., --sync incore, or outcore\n\ Sort method: E.g., --sortby src, or dest\n"; try { std::ifstream inputFile; std::ofstream outputFile; std::string outputFileName; int selectedDevice = 0; int bsize = 0, bcount = 0; int vwsize = 32; int threads = 1; long long arbparam = 0; bool nonDirectedGraph = false; // By default, the graph is directed. ProcessingType processingMethod = ProcessingType::Unknown; syncMethod = OutOfCore; smemMethod = UseNoSmem; bool sortBySource = false; /******************************** * GETTING INPUT PARAMETERS. ********************************/ for( int iii = 1; iii < argc; ++iii ) if ( !strcmp(argv[iii], "--method") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "bmf") ) processingMethod = ProcessingType::Push; else if ( !strcmp(argv[iii+1], "tpe") ) processingMethod = ProcessingType::Neighbor; else if ( !strcmp(argv[iii+1], "opt") ) processingMethod = ProcessingType::Own; else{ std::cerr << "\n Un-recognized method parameter value \n\n"; exit(EXIT_FAILURE); } } else if ( !strcmp(argv[iii], "--sync") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "incore") ){ syncMethod = InCore; } else if ( !strcmp(argv[iii+1], "outcore") ){ syncMethod = OutOfCore; } else { std::cerr << "\n Un-recognized sync parameter value \n\n"; exit(EXIT_FAILURE); } } else if ( !strcmp(argv[iii], "--usesmem") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "yes") ){ smemMethod = UseSmem; } else if ( !strcmp(argv[iii+1], "no") ){ smemMethod = UseNoSmem; } else{ std::cerr << "\n Un-recognized usesmem parameter value \n\n"; exit(EXIT_FAILURE); } } else if( !strcmp( argv[iii], "--input" ) && iii != argc-1 /*is not the last one*/){ openFileToAccess< std::ifstream >( inputFile, std::string( argv[iii+1] ) ); } else if( !strcmp( argv[iii], "--output" ) && iii != argc-1 /*is not the last one*/){ openFileToAccess< std::ofstream >( outputFile, std::string( argv[iii+1] ) ); outputFileName = std::string(argv[iii+1]); } else if( !strcmp( argv[iii], "--bsize" ) && iii != argc-1 /*is not the last one*/){ bsize = std::atoi( argv[iii+1] ); } else if( !strcmp( argv[iii], "--bcount" ) && iii != argc-1 /*is not the last one*/){ bcount = std::atoi( argv[iii+1] ); } else if( !strcmp( argv[iii], "--sortby" ) && iii != argc-1 /*is not the last one*/){ if( !strcmp(argv[iii+1], "src" )){ sortBySource = true; } else if( !strcmp(argv[iii+1], "dest")){ sortBySource = false; } else { std::cerr << "\n Unrecognized sortby parameter value\n\n"; exit(EXIT_FAILURE); } } if(bsize <= 0 || bcount <= 0){ std::cerr << "Usage: " << usage; exit(EXIT_FAILURE); throw std::runtime_error("\nAn initialization error happened.\nExiting."); } if( !inputFile.is_open() || processingMethod == ProcessingType::Unknown ) { std::cerr << "Usage: " << usage; throw std::runtime_error( "\nAn initialization error happened.\nExiting." ); } if( !outputFile.is_open() ){ openFileToAccess< std::ofstream >( outputFile, "out.txt" ); outputFileName = "out.txt"; } CUDAErrorCheck( hipSetDevice( selectedDevice ) ); std::cout << "Device with ID " << selectedDevice << " is selected to process the graph.\n"; /******************************** * Read the input graph file. ********************************/ std::cout << "Collecting the input graph ...\n"; std::vector<initial_vertex> parsedGraph( 0 ); uint nEdges = parse_graph::parse( inputFile, // Input file. parsedGraph, // The parsed graph. arbparam, nonDirectedGraph ); // Arbitrary user-provided parameter. std::cout << "Input graph collected with " << parsedGraph.size() << " vertices and " << nEdges << " edges.\n"; /******************************** * Process the graph. ********************************/ switch(processingMethod){ case ProcessingType::Push: if(smemMethod == UseSmem){ cout << "USE SMEM" << endl; puller_usesmem(&parsedGraph, bsize, bcount, outputFile); } else if(syncMethod == OutOfCore){ puller(&parsedGraph, bsize, bcount, outputFile, sortBySource); } else if(syncMethod == InCore){ puller_incore(&parsedGraph, bsize, bcount, outputFile, sortBySource); } else { cout << "syncMethod not specified" << endl; exit(EXIT_FAILURE); } break; case ProcessingType::Neighbor: if(syncMethod == OutOfCore){ impl2_outcore(&parsedGraph, bsize, bcount, outputFile, sortBySource); } else if(syncMethod == InCore){ impl2_incore(&parsedGraph, bsize, bcount, outputFile, sortBySource); } else { cout << "syncMethod not specified" << endl; exit(EXIT_FAILURE); } break; default: own(&parsedGraph, bsize, bcount); } /******************************** * It's done here. ********************************/ edge_node *testEdgeList = new edge_node[nEdges]; pull_edges(parsedGraph, testEdgeList, nEdges); testCorrectness(testEdgeList, outputFileName.c_str(), parsedGraph.size(), nEdges); // myTestCorrectness(&parsedGraph, outputFileName.c_str()); CUDAErrorCheck( hipDeviceReset() ); std::cout << "Done.\n"; return( EXIT_SUCCESS ); } catch( const std::exception& strException ) { std::cerr << strException.what() << "\n"; return( EXIT_FAILURE ); } catch(...) { std::cerr << "An exception has occurred." << std::endl; return( EXIT_FAILURE ); } }
9f3d24768d0181dd2d1095f22f001f2f9a35b8c8.cu
#include <cstring> #include <stdexcept> #include <cstdlib> #include <fstream> #include <iostream> #include <vector> #include "utils.h" #include "cuda_error_check.cuh" #include "initial_graph.hpp" #include "parse_graph.hpp" #include "opt.cu" #include "impl2.cu" #include "impl1.cu" #define SSSP_INF 1073741824 enum class ProcessingType {Push, Neighbor, Own, Unknown}; enum SyncMode {InCore, OutOfCore}; enum SyncMode syncMethod; enum SmemMode {UseSmem, UseNoSmem}; enum SmemMode smemMethod; // Open files safely. template <typename T_file> void openFileToAccess( T_file& input_file, std::string file_name ) { input_file.open( file_name.c_str() ); if( !input_file ) throw std::runtime_error( "Failed to open specified file: " + file_name + "\n" ); } void myTestCorrectness(std::vector<initial_vertex> * parsedGraph, const char* outputFileName) { std::cout << std::endl << "TESTING CORRECTNESS" << std::endl; std::cout << "RUNNING SEQUENTIAL BMF..." << std::endl; int vertex_size = (*parsedGraph).size(); int *d= new int[vertex_size]; for (int i = 0; i < vertex_size; ++i) d[i] = SSSP_INF; d[0]=0; int change = 0; for (int k = 1; k < vertex_size; k++){ for (int i = 0; i < vertex_size; i++){ std::vector<neighbor> nbrs = (*parsedGraph)[i].nbrs; for (int j = 0; j < nbrs.size(); ++j){ int u = nbrs[j].srcIndex; int v = i; int w = nbrs[j].edgeValue.weight; if ((d[u] + w) < d[v]){ d[v] = d[u]+w; change = 1; } } } if (change == 0) break; change = 0; } //Compare the distance array and the parallel output file std::ifstream outputFile; openFileToAccess< std::ifstream >( outputFile, std::string( outputFileName ) ); std::string line; int i = 0; int incorrect = 0; while (getline(outputFile,line)) { std::string curr = (d[i] < SSSP_INF) ? (std::to_string(i) + ":" + std::to_string(d[i])):(std::to_string(i) +":" + "INF"); // std::cout << std::to_string(line.compare(curr)) << std::endl; if(line.compare(curr) != 0) { incorrect++; std::cout << "Correct: " << curr << "\tYours: " << line << std::endl; } i++; } if(i != vertex_size) { std::cout << "Insufficient vertices found in outputfile" << std::endl; std::cout << "Expected: " << vertex_size << "Found: " << i << std::endl; return; } std::cout << "Correct: " << std::to_string(vertex_size-incorrect) << "\t Incorrect: " << std::to_string(incorrect) << " \t Total: " << std::to_string(vertex_size) << std::endl; outputFile.close(); } void testCorrectness(edge_node *edges, const char* outputFileName, uint nVertices, uint nEdges) { std::cout << std::endl << "TESTING CORRECTNESS" << std::endl; std::cout << "RUNNING SEQUENTIAL BMF..." << std::endl; unsigned int *d= new unsigned int[nVertices]; d[0]=0; for (int i = 1; i < nVertices; ++i){ d[i] = UINT_MAX; } int change = 0; for(int i = 1; i < nVertices; i++){ for(int j = 0; j < nEdges; j++){ int u = edges[j].srcIndex; int v = edges[j].destIndex; int w = edges[j].weight; if(d[u] == UINT_MAX){ continue; } else if(d[u]+w < d[v]){ d[v] = d[u]+w; change = 1; } } if(!change){ break; } change = 0; } //Compare the distance array and the parallel output file std::ifstream outputFile; openFileToAccess< std::ifstream >( outputFile, std::string( outputFileName ) ); std::string line; int i = 0; int incorrect = 0; while (getline(outputFile,line)) { std::string curr = (d[i] < UINT_MAX) ? (std::to_string(i) + ":" + std::to_string(d[i])):(std::to_string(i) +":" + "INF"); if(line.compare(curr) != 0) { incorrect++; // std::cout << "Correct: " << curr << "\tYours: " << line << std::endl; } i++; } if(i != nVertices) { std::cout << "Insufficient vertices found in outputfile" << std::endl; std::cout << "Expected: " << nVertices << "Found: " << i << std::endl; return; } std::cout << "Correct: " << std::to_string(nVertices-incorrect) << "\t Incorrect: " << std::to_string(incorrect) << " \t Total: " << std::to_string(nVertices) << std::endl; outputFile.close(); } // Execution entry point. int main( int argc, char** argv ) { std::string usage = "\tRequired command line arguments:\n\ Input file: E.g., --input in.txt\n\ Block size: E.g., --bsize 512\n\ Block count: E.g., --bcount 192\n\ Output path: E.g., --output output.txt\n\ Processing method: E.g., --method bmf (bellman-ford), or tpe (to-process-edge), or opt (one further optimizations)\n\ Shared memory usage: E.g., --usesmem yes, or no \n\ Sync method: E.g., --sync incore, or outcore\n\ Sort method: E.g., --sortby src, or dest\n"; try { std::ifstream inputFile; std::ofstream outputFile; std::string outputFileName; int selectedDevice = 0; int bsize = 0, bcount = 0; int vwsize = 32; int threads = 1; long long arbparam = 0; bool nonDirectedGraph = false; // By default, the graph is directed. ProcessingType processingMethod = ProcessingType::Unknown; syncMethod = OutOfCore; smemMethod = UseNoSmem; bool sortBySource = false; /******************************** * GETTING INPUT PARAMETERS. ********************************/ for( int iii = 1; iii < argc; ++iii ) if ( !strcmp(argv[iii], "--method") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "bmf") ) processingMethod = ProcessingType::Push; else if ( !strcmp(argv[iii+1], "tpe") ) processingMethod = ProcessingType::Neighbor; else if ( !strcmp(argv[iii+1], "opt") ) processingMethod = ProcessingType::Own; else{ std::cerr << "\n Un-recognized method parameter value \n\n"; exit(EXIT_FAILURE); } } else if ( !strcmp(argv[iii], "--sync") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "incore") ){ syncMethod = InCore; } else if ( !strcmp(argv[iii+1], "outcore") ){ syncMethod = OutOfCore; } else { std::cerr << "\n Un-recognized sync parameter value \n\n"; exit(EXIT_FAILURE); } } else if ( !strcmp(argv[iii], "--usesmem") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "yes") ){ smemMethod = UseSmem; } else if ( !strcmp(argv[iii+1], "no") ){ smemMethod = UseNoSmem; } else{ std::cerr << "\n Un-recognized usesmem parameter value \n\n"; exit(EXIT_FAILURE); } } else if( !strcmp( argv[iii], "--input" ) && iii != argc-1 /*is not the last one*/){ openFileToAccess< std::ifstream >( inputFile, std::string( argv[iii+1] ) ); } else if( !strcmp( argv[iii], "--output" ) && iii != argc-1 /*is not the last one*/){ openFileToAccess< std::ofstream >( outputFile, std::string( argv[iii+1] ) ); outputFileName = std::string(argv[iii+1]); } else if( !strcmp( argv[iii], "--bsize" ) && iii != argc-1 /*is not the last one*/){ bsize = std::atoi( argv[iii+1] ); } else if( !strcmp( argv[iii], "--bcount" ) && iii != argc-1 /*is not the last one*/){ bcount = std::atoi( argv[iii+1] ); } else if( !strcmp( argv[iii], "--sortby" ) && iii != argc-1 /*is not the last one*/){ if( !strcmp(argv[iii+1], "src" )){ sortBySource = true; } else if( !strcmp(argv[iii+1], "dest")){ sortBySource = false; } else { std::cerr << "\n Unrecognized sortby parameter value\n\n"; exit(EXIT_FAILURE); } } if(bsize <= 0 || bcount <= 0){ std::cerr << "Usage: " << usage; exit(EXIT_FAILURE); throw std::runtime_error("\nAn initialization error happened.\nExiting."); } if( !inputFile.is_open() || processingMethod == ProcessingType::Unknown ) { std::cerr << "Usage: " << usage; throw std::runtime_error( "\nAn initialization error happened.\nExiting." ); } if( !outputFile.is_open() ){ openFileToAccess< std::ofstream >( outputFile, "out.txt" ); outputFileName = "out.txt"; } CUDAErrorCheck( cudaSetDevice( selectedDevice ) ); std::cout << "Device with ID " << selectedDevice << " is selected to process the graph.\n"; /******************************** * Read the input graph file. ********************************/ std::cout << "Collecting the input graph ...\n"; std::vector<initial_vertex> parsedGraph( 0 ); uint nEdges = parse_graph::parse( inputFile, // Input file. parsedGraph, // The parsed graph. arbparam, nonDirectedGraph ); // Arbitrary user-provided parameter. std::cout << "Input graph collected with " << parsedGraph.size() << " vertices and " << nEdges << " edges.\n"; /******************************** * Process the graph. ********************************/ switch(processingMethod){ case ProcessingType::Push: if(smemMethod == UseSmem){ cout << "USE SMEM" << endl; puller_usesmem(&parsedGraph, bsize, bcount, outputFile); } else if(syncMethod == OutOfCore){ puller(&parsedGraph, bsize, bcount, outputFile, sortBySource); } else if(syncMethod == InCore){ puller_incore(&parsedGraph, bsize, bcount, outputFile, sortBySource); } else { cout << "syncMethod not specified" << endl; exit(EXIT_FAILURE); } break; case ProcessingType::Neighbor: if(syncMethod == OutOfCore){ impl2_outcore(&parsedGraph, bsize, bcount, outputFile, sortBySource); } else if(syncMethod == InCore){ impl2_incore(&parsedGraph, bsize, bcount, outputFile, sortBySource); } else { cout << "syncMethod not specified" << endl; exit(EXIT_FAILURE); } break; default: own(&parsedGraph, bsize, bcount); } /******************************** * It's done here. ********************************/ edge_node *testEdgeList = new edge_node[nEdges]; pull_edges(parsedGraph, testEdgeList, nEdges); testCorrectness(testEdgeList, outputFileName.c_str(), parsedGraph.size(), nEdges); // myTestCorrectness(&parsedGraph, outputFileName.c_str()); CUDAErrorCheck( cudaDeviceReset() ); std::cout << "Done.\n"; return( EXIT_SUCCESS ); } catch( const std::exception& strException ) { std::cerr << strException.what() << "\n"; return( EXIT_FAILURE ); } catch(...) { std::cerr << "An exception has occurred." << std::endl; return( EXIT_FAILURE ); } }
23f11ac1f80b3f68e379d35af81e074b915566ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* * Host function to initialize vector elements. This function * simply initializes each element to equal its index in the * vector. */ __global__ void initWith(float num, float *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for( int i = idx; i < N; i += stride ) a[i] = num; } /* * Device kernel stores into `result` the sum of each * same-indexed value of `a` and `b`. */ __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } /* * Host function to confirm values in `vector`. This function * assumes all values are the same `target` value. */ void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { const int N = 2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; hipMallocManaged(&a, size); hipMallocManaged(&b, size); hipMallocManaged(&c, size); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 120; hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks),dim3(threadsPerBlock), 0, 0, 3, a, N); hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks),dim3(threadsPerBlock), 0, 0, 4, b, N); hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks),dim3(threadsPerBlock), 0, 0, 0, c, N); /* * nsys should register performance changes when execution configuration * is updated. */ threadsPerBlock = 256; numberOfBlocks = 120; hipError_t addVectorsErr; hipError_t asyncErr; hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N); addVectorsErr = hipGetLastError(); if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr)); asyncErr = hipDeviceSynchronize(); if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr)); checkElementsAre(7, c, N); hipFree(a); hipFree(b); hipFree(c); }
23f11ac1f80b3f68e379d35af81e074b915566ce.cu
#include <stdio.h> /* * Host function to initialize vector elements. This function * simply initializes each element to equal its index in the * vector. */ __global__ void initWith(float num, float *a, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for( int i = idx; i < N; i += stride ) a[i] = num; } /* * Device kernel stores into `result` the sum of each * same-indexed value of `a` and `b`. */ __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for(int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } /* * Host function to confirm values in `vector`. This function * assumes all values are the same `target` value. */ void checkElementsAre(float target, float *vector, int N) { for(int i = 0; i < N; i++) { if(vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { const int N = 2<<24; size_t size = N * sizeof(float); float *a; float *b; float *c; cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 120; initWith<<<numberOfBlocks,threadsPerBlock>>>(3, a, N); initWith<<<numberOfBlocks,threadsPerBlock>>>(4, b, N); initWith<<<numberOfBlocks,threadsPerBlock>>>(0, c, N); /* * nsys should register performance changes when execution configuration * is updated. */ threadsPerBlock = 256; numberOfBlocks = 120; cudaError_t addVectorsErr; cudaError_t asyncErr; addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N); addVectorsErr = cudaGetLastError(); if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr)); asyncErr = cudaDeviceSynchronize(); if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
53e3288fa3d11292d9121154fc06fea8f0886069.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * serveral useful gpu functions will be defined in this file to facilitate * the surface redistance scheme ******************************************************************************/ typedef struct { double sR; double sL; } double_eno_derivative; __device__ inline double max2(double x, double y) { return (x<y) ? y : x; } __device__ inline double min2(double x, double y) { return (x<y) ? x : y; } __device__ inline double min_mod(double x, double y) { return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y); } __device__ inline double max_abs(double x, double y) { return (fabs(x)<fabs(y) ? y : x); } __device__ inline double sign(double x) { return (x>0) ? 1.0 : -1.0; } __device__ inline void upwind_derivative(double & Dr, double Cx, double Cy, double Cz, double xR, double xL, double yF, double yB, double zU, double zD) { Dr = 0; Dr += max2(Cx,0)*xR - min2(Cx,0)*xL; Dr += max2(Cy,0)*yF - min2(Cy,0)*yB; Dr += max2(Cz,0)*zU - min2(Cz,0)*zD; } // convert subindex to linear index // periodic boundary conditions are assumed __device__ inline int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges) { int row_idxn = min2(rows-1, max2(0, row_idx)); int col_idxn = min2(cols-1, max2(0, col_idx)); int pge_idxn = min2(pges-1, max2(0, pge_idx)); int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn; return ind; } /****************************************************************************** * calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3] ******************************************************************************/ __device__ inline double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds) { double p2m; double_eno_derivative eno_d; double p2 = v1 - 2.0 * v0 + v2; double p2r = v0 - 2.0 * v2 + v3; p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2); double vr = (pr==ds) ? v2 : 0; eno_d.sR = (vr - v0) / pr - pr * p2m; double p2l = v0 - 2.0 * v1 + v4; p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2); double vl = (pl==ds) ? v1 : 0; eno_d.sL = (v0 - vl) / pl + pl * p2m; return eno_d; } // calculate surface redistance step // now lsf represents the auxilary level set function(not the level set function) // inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors __global__ void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * ax, double const * ay, double const * az, double const * bx, double const * by, double const * bz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges); double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx); //double Dx[2] = {eno_dx.sR, eno_dx.sL}; double xR = eno_dx.sR; double xL = eno_dx.sL; int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges); double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy); //double Dy[2] = {eno_dy.sR, eno_dy.sL}; double yF = eno_dy.sR; double yB = eno_dy.sL; int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges); double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz); //double Dz[2] = {eno_dz.sR, eno_dz.sL}; double zU = eno_dz.sR; double zD = eno_dz.sL; double Nx = nx[ind]; double Ny = ny[ind]; double Nz = nz[ind]; double Ax = ax[ind]; double Ay = ay[ind]; double Az = az[ind]; double Bx = bx[ind]; double By = by[ind]; double Bz = bz[ind]; double Sign = sign[ind]; double dt = deltat[ind]; // forward and backward derivatives along a/b directions double Da_f, Da_b, Db_f, Db_b; upwind_derivative(Da_f,Ax,Ay,Az,xR,xL,yF,yB,zU,zD); upwind_derivative(Da_b,-Ax,-Ay,-Az,xR,xL,yF,yB,zU,zD); upwind_derivative(Db_f,Bx,By,Bz,xR,xL,yF,yB,zU,zD); upwind_derivative(Db_b,-Bx,-By,-Bz,xR,xL,yF,yB,zU,zD); // choice of one-sided derivatives in the surface coordiante double Da = max_abs(max2(Sign*Da_b,0),min2(Sign*Da_f,0)); double Db = max_abs(max2(Sign*Db_b,0),min2(Sign*Db_f,0)); double den = sqrt(Da*Da+Db*Db); // direction of information double H1 = (Da * Ax + Db * Bx) / den; double H2 = (Da * Ay + Db * By) / den; double H3 = (Da * Az + Db * Bz) / den; // select the right derivatives double dr_x = (H1>0) ? xL : ( (H1<0) ? xR : 0 ); double dr_y = (H2>0) ? yB : ( (H2<0) ? yF : 0 ); double dr_z = (H3>0) ? zD : ( (H3<0) ? zU : 0 ); double dr_n = Nx * dr_x + Ny * dr_y + Nz * dr_z; step[ind] = dt * Sign * (sqrt( dr_x*dr_x + dr_y*dr_y + dr_z*dr_z - dr_n*dr_n) - 1); } // construct a local coordinate system at each point __global__ void surface_coordinate(double * ax, double * ay, double * az, double * bx, double * by, double * bz, double const * nx, double const * ny, double const * nz, int rows, int cols, int pges) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); double Nx = nx[ind]; double Ny = ny[ind]; double Nz = nz[ind]; double epsilon = 1e-6; if(fabs(Nx)>epsilon){ double den1 = sqrt(Nx*Nx+Ny*Ny); ax[ind] = -Ny/den1; ay[ind] = Nx/den1; den1 = sqrt(Nx*Nx+Nz*Nz); bx[ind] = - Nz/den1; bz[ind] = Nx/den1; } else if(fabs(Ny)>epsilon){ double den1 = sqrt(Ny*Ny+Nx*Nx); ay[ind] = -Nx/den1; ax[ind] = Ny/den1; den1 = sqrt(Ny*Ny+Nz*Nz); by[ind] = - Nz/den1; bz[ind] = Ny/den1; } else if(fabs(Nz)>epsilon){ double den1 = sqrt(Nz*Nz+Nx*Nx); az[ind] = -Nx/den1; ax[ind] = Nz/den1; den1 = sqrt(Nz*Nz+Ny*Ny); bz[ind] = - Ny/den1; by[ind] = Nz/den1; } } __global__ void surface_redistance_step_backup(double * step, double const * lsf, double const * sign, double const * deltat, double const * ax, double const * ay, double const * az, double const * bx, double const * by, double const * bz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges); double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx); //double Dx[2] = {eno_dx.sR, eno_dx.sL}; double xR = eno_dx.sR; double xL = eno_dx.sL; int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges); double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy); //double Dy[2] = {eno_dy.sR, eno_dy.sL}; double yF = eno_dy.sR; double yB = eno_dy.sL; int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges); double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz); //double Dz[2] = {eno_dz.sR, eno_dz.sL}; double zU = eno_dz.sR; double zD = eno_dz.sL; double Ax = ax[ind]; double Ay = ay[ind]; double Az = az[ind]; double Bx = bx[ind]; double By = by[ind]; double Bz = bz[ind]; double Sign = sign[ind]; double dt = deltat[ind]; // forward and backward derivatives along a/b directions double Da_f, Da_b, Db_f, Db_b; upwind_derivative(Da_f,Ax,Ay,Az,xR,xL,yF,yB,zU,zD); upwind_derivative(Da_b,-Ax,-Ay,-Az,xR,xL,yF,yB,zU,zD); upwind_derivative(Db_f,Bx,By,Bz,xR,xL,yF,yB,zU,zD); upwind_derivative(Db_b,-Bx,-By,-Bz,xR,xL,yF,yB,zU,zD); double Da = max(max2(Sign*Da_b,0),-min2(Sign*Da_f,0)); double Db = max(max2(Sign*Db_b,0),-min2(Sign*Db_f,0)); step[ind] = dt * Sign * (sqrt(Da*Da + Db*Db) - 1); }
53e3288fa3d11292d9121154fc06fea8f0886069.cu
/******************************************************************************* * serveral useful gpu functions will be defined in this file to facilitate * the surface redistance scheme ******************************************************************************/ typedef struct { double sR; double sL; } double_eno_derivative; __device__ inline double max2(double x, double y) { return (x<y) ? y : x; } __device__ inline double min2(double x, double y) { return (x<y) ? x : y; } __device__ inline double min_mod(double x, double y) { return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y); } __device__ inline double max_abs(double x, double y) { return (fabs(x)<fabs(y) ? y : x); } __device__ inline double sign(double x) { return (x>0) ? 1.0 : -1.0; } __device__ inline void upwind_derivative(double & Dr, double Cx, double Cy, double Cz, double xR, double xL, double yF, double yB, double zU, double zD) { Dr = 0; Dr += max2(Cx,0)*xR - min2(Cx,0)*xL; Dr += max2(Cy,0)*yF - min2(Cy,0)*yB; Dr += max2(Cz,0)*zU - min2(Cz,0)*zD; } // convert subindex to linear index // periodic boundary conditions are assumed __device__ inline int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges) { int row_idxn = min2(rows-1, max2(0, row_idx)); int col_idxn = min2(cols-1, max2(0, col_idx)); int pge_idxn = min2(pges-1, max2(0, pge_idx)); int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn; return ind; } /****************************************************************************** * calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3] ******************************************************************************/ __device__ inline double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds) { double p2m; double_eno_derivative eno_d; double p2 = v1 - 2.0 * v0 + v2; double p2r = v0 - 2.0 * v2 + v3; p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2); double vr = (pr==ds) ? v2 : 0; eno_d.sR = (vr - v0) / pr - pr * p2m; double p2l = v0 - 2.0 * v1 + v4; p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2); double vl = (pl==ds) ? v1 : 0; eno_d.sL = (v0 - vl) / pl + pl * p2m; return eno_d; } // calculate surface redistance step // now lsf represents the auxilary level set function(not the level set function) // inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors __global__ void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * ax, double const * ay, double const * az, double const * bx, double const * by, double const * bz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges); double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx); //double Dx[2] = {eno_dx.sR, eno_dx.sL}; double xR = eno_dx.sR; double xL = eno_dx.sL; int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges); double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy); //double Dy[2] = {eno_dy.sR, eno_dy.sL}; double yF = eno_dy.sR; double yB = eno_dy.sL; int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges); double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz); //double Dz[2] = {eno_dz.sR, eno_dz.sL}; double zU = eno_dz.sR; double zD = eno_dz.sL; double Nx = nx[ind]; double Ny = ny[ind]; double Nz = nz[ind]; double Ax = ax[ind]; double Ay = ay[ind]; double Az = az[ind]; double Bx = bx[ind]; double By = by[ind]; double Bz = bz[ind]; double Sign = sign[ind]; double dt = deltat[ind]; // forward and backward derivatives along a/b directions double Da_f, Da_b, Db_f, Db_b; upwind_derivative(Da_f,Ax,Ay,Az,xR,xL,yF,yB,zU,zD); upwind_derivative(Da_b,-Ax,-Ay,-Az,xR,xL,yF,yB,zU,zD); upwind_derivative(Db_f,Bx,By,Bz,xR,xL,yF,yB,zU,zD); upwind_derivative(Db_b,-Bx,-By,-Bz,xR,xL,yF,yB,zU,zD); // choice of one-sided derivatives in the surface coordiante double Da = max_abs(max2(Sign*Da_b,0),min2(Sign*Da_f,0)); double Db = max_abs(max2(Sign*Db_b,0),min2(Sign*Db_f,0)); double den = sqrt(Da*Da+Db*Db); // direction of information double H1 = (Da * Ax + Db * Bx) / den; double H2 = (Da * Ay + Db * By) / den; double H3 = (Da * Az + Db * Bz) / den; // select the right derivatives double dr_x = (H1>0) ? xL : ( (H1<0) ? xR : 0 ); double dr_y = (H2>0) ? yB : ( (H2<0) ? yF : 0 ); double dr_z = (H3>0) ? zD : ( (H3<0) ? zU : 0 ); double dr_n = Nx * dr_x + Ny * dr_y + Nz * dr_z; step[ind] = dt * Sign * (sqrt( dr_x*dr_x + dr_y*dr_y + dr_z*dr_z - dr_n*dr_n) - 1); } // construct a local coordinate system at each point __global__ void surface_coordinate(double * ax, double * ay, double * az, double * bx, double * by, double * bz, double const * nx, double const * ny, double const * nz, int rows, int cols, int pges) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); double Nx = nx[ind]; double Ny = ny[ind]; double Nz = nz[ind]; double epsilon = 1e-6; if(fabs(Nx)>epsilon){ double den1 = sqrt(Nx*Nx+Ny*Ny); ax[ind] = -Ny/den1; ay[ind] = Nx/den1; den1 = sqrt(Nx*Nx+Nz*Nz); bx[ind] = - Nz/den1; bz[ind] = Nx/den1; } else if(fabs(Ny)>epsilon){ double den1 = sqrt(Ny*Ny+Nx*Nx); ay[ind] = -Nx/den1; ax[ind] = Ny/den1; den1 = sqrt(Ny*Ny+Nz*Nz); by[ind] = - Nz/den1; bz[ind] = Ny/den1; } else if(fabs(Nz)>epsilon){ double den1 = sqrt(Nz*Nz+Nx*Nx); az[ind] = -Nx/den1; ax[ind] = Nz/den1; den1 = sqrt(Nz*Nz+Ny*Ny); bz[ind] = - Ny/den1; by[ind] = Nz/den1; } } __global__ void surface_redistance_step_backup(double * step, double const * lsf, double const * sign, double const * deltat, double const * ax, double const * ay, double const * az, double const * bx, double const * by, double const * bz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele) { int row_idx = blockIdx.x * blockDim.x + threadIdx.x; int col_idx = blockIdx.y * blockDim.y + threadIdx.y; int pge_idx = blockIdx.z * blockDim.z + threadIdx.z; if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){ return; } int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges); int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges); int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges); int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges); int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges); double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx); //double Dx[2] = {eno_dx.sR, eno_dx.sL}; double xR = eno_dx.sR; double xL = eno_dx.sL; int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges); int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges); int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges); int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges); double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy); //double Dy[2] = {eno_dy.sR, eno_dy.sL}; double yF = eno_dy.sR; double yB = eno_dy.sL; int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges); int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges); int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges); int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges); double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz); //double Dz[2] = {eno_dz.sR, eno_dz.sL}; double zU = eno_dz.sR; double zD = eno_dz.sL; double Ax = ax[ind]; double Ay = ay[ind]; double Az = az[ind]; double Bx = bx[ind]; double By = by[ind]; double Bz = bz[ind]; double Sign = sign[ind]; double dt = deltat[ind]; // forward and backward derivatives along a/b directions double Da_f, Da_b, Db_f, Db_b; upwind_derivative(Da_f,Ax,Ay,Az,xR,xL,yF,yB,zU,zD); upwind_derivative(Da_b,-Ax,-Ay,-Az,xR,xL,yF,yB,zU,zD); upwind_derivative(Db_f,Bx,By,Bz,xR,xL,yF,yB,zU,zD); upwind_derivative(Db_b,-Bx,-By,-Bz,xR,xL,yF,yB,zU,zD); double Da = max(max2(Sign*Da_b,0),-min2(Sign*Da_f,0)); double Db = max(max2(Sign*Db_b,0),-min2(Sign*Db_f,0)); step[ind] = dt * Sign * (sqrt(Da*Da + Db*Db) - 1); }
ab38b13907503a4e09a36b9953f5bfdc343dfd2d.hip
// !!! This is a file automatically generated by hipify!!! // moveArrays.cu // ref: http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/207200659?pgno=2 // demonstrates CUDA interface to data allocation on device (GPU) // and data movement between host (CPU) and device. #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d, *b_d; // pointers to device memory int N = 14; int i; // allocate arrays on host a_h = (float *)malloc(sizeof(float)*N); b_h = (float *)malloc(sizeof(float)*N); // allocate arrays on device hipMalloc((void **) &a_d, sizeof(float)*N); hipMalloc((void **) &b_d, sizeof(float)*N); // initialize host data for (i=0; i<N; i++) { a_h[i] = 10.f+i; b_h[i] = 0.f; } // send data from host to device: a_h to a_d hipMemcpy(a_d, a_h, sizeof(float)*N, hipMemcpyHostToDevice); // copy data within device: a_d to b_d hipMemcpy(b_d, a_d, sizeof(float)*N, hipMemcpyDeviceToDevice); // retrieve data from device: b_d to b_h hipMemcpy(b_h, b_d, sizeof(float)*N, hipMemcpyDeviceToHost); // check result for (i=0; i<N; i++) assert(a_h[i] == b_h[i]); // cleanup free(a_h); free(b_h); hipFree(a_d); hipFree(b_d); }
ab38b13907503a4e09a36b9953f5bfdc343dfd2d.cu
// moveArrays.cu // ref: http://www.drdobbs.com/parallel/cuda-supercomputing-for-the-masses-part/207200659?pgno=2 // demonstrates CUDA interface to data allocation on device (GPU) // and data movement between host (CPU) and device. #include <stdio.h> #include <assert.h> #include <cuda.h> int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d, *b_d; // pointers to device memory int N = 14; int i; // allocate arrays on host a_h = (float *)malloc(sizeof(float)*N); b_h = (float *)malloc(sizeof(float)*N); // allocate arrays on device cudaMalloc((void **) &a_d, sizeof(float)*N); cudaMalloc((void **) &b_d, sizeof(float)*N); // initialize host data for (i=0; i<N; i++) { a_h[i] = 10.f+i; b_h[i] = 0.f; } // send data from host to device: a_h to a_d cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice); // copy data within device: a_d to b_d cudaMemcpy(b_d, a_d, sizeof(float)*N, cudaMemcpyDeviceToDevice); // retrieve data from device: b_d to b_h cudaMemcpy(b_h, b_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // check result for (i=0; i<N; i++) assert(a_h[i] == b_h[i]); // cleanup free(a_h); free(b_h); cudaFree(a_d); cudaFree(b_d); }
c41a9509e7a28bc821154d44aedb7935fbb8fccb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<bits/stdc++.h> using namespace std; #define BLOCK_SIZE 256 __global__ void pegasos_per_thread(int num_samples, int num_features, double * W, double * X, double * Y, double lambda, int num_iters, double * random_arr, int k) { int index = blockIdx.x * blockDim.x + threadIdx.x; int n_samples_per_thread = num_samples / k; for(int sample=0; sample<n_samples_per_thread; sample++) { double lr = 1.0 / (lambda * (sample+1)); double pred_output = 0; for (int i=0; i<num_features; i++) pred_output += W[index * num_features + i] * X[(n_samples_per_thread * index + sample) * num_features + i]; if (Y[n_samples_per_thread * index + sample] * pred_output >= 1.0) { for (int i=0; i<num_features; i++) W[index * num_features + i] = (1.0 - lr * lambda) * W[index * num_features + i]; } else { for (int i=0; i<num_features; i++) W[index * num_features + i] = (1.0 - lr * lambda) * W[index * num_features + i] + (lr * Y[n_samples_per_thread * index + sample]) * X[(n_samples_per_thread * index + sample) * num_features + i]; } } } int main() { srand(time(NULL)); ifstream trainfile ("train.txt"); ifstream labelfile ("labels.txt"); int n_samples=20000; int n_features=500; int k = 100; int numBlocks = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; int num_iters = 10; double lambda = 1.0; int n_samples_per_thread = n_samples / k; double *W, *X, *Y, *final_W, *random_arr; double *d_W, *d_X, *d_Y, *d_random_arr; W = (double *) malloc(k * n_features * sizeof(double)); final_W = (double *) malloc(n_features * sizeof(double)); X = (double *) malloc(n_samples * n_features * sizeof(double)); Y = (double *) malloc(n_samples * sizeof(double)); random_arr = (double *) malloc(num_iters * sizeof(double)); hipMalloc(&d_W, k * n_features * sizeof(double)); hipMalloc(&d_X, n_samples * n_features * sizeof(double)); hipMalloc(&d_Y, n_samples * sizeof(double)); hipMalloc(&d_random_arr, num_iters * sizeof(double)); for (int i=0;i<n_samples;i++) { for (int j=0;j<n_features;j++) trainfile >> X[i*n_features + j]; } for (int i=0;i<n_samples;i++) { labelfile >> Y[i]; if (Y[i] == 0) { Y[i] = -1; } } for (int i=0;i<k;i++) { for (int j=0;j<n_features;j++) W[i * n_features + j] = 0; } for (int i=0;i<n_features;i++) final_W[i] = 0; for (int i=0;i<num_iters;i++) random_arr[i] = rand() % n_samples_per_thread; hipMemcpy(d_X, X, n_samples * n_features * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_W, W, k * n_features * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_Y, Y, n_samples * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_random_arr, random_arr, num_iters * sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( pegasos_per_thread), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, n_samples, n_features, d_W, d_X, d_Y, lambda, num_iters, d_random_arr, k); hipMemcpy(W, d_W, k * n_features * sizeof(double), hipMemcpyDeviceToHost); // for (int i=0;i<k;i++) // for (int j=0;j<n_features;j++) // cout << W[i * n_features + j] << " "; for (int i=0;i<k;i++) { for (int j=0;j<n_features;j++) final_W[j] += (W[i * n_features + j]); } for (int i=0;i<n_features;i++) final_W[i] /= k; // cout << "\nFinalW\n"; // for (int i=0;i<n_features;i++) {cout << final_W[i] << " ";} double correct = 0.0; for (int i=0;i<n_samples;i++) { double val = 0.0; for (int j=0;j<n_features;j++) val += final_W[j] * X[i * n_features + j]; if (val * Y[i] >= 0) correct += 1; } cout << "Correct " << correct << endl; printf("Accuracy %lf\n", correct / n_samples); return 0; }
c41a9509e7a28bc821154d44aedb7935fbb8fccb.cu
#include<bits/stdc++.h> using namespace std; #define BLOCK_SIZE 256 __global__ void pegasos_per_thread(int num_samples, int num_features, double * W, double * X, double * Y, double lambda, int num_iters, double * random_arr, int k) { int index = blockIdx.x * blockDim.x + threadIdx.x; int n_samples_per_thread = num_samples / k; for(int sample=0; sample<n_samples_per_thread; sample++) { double lr = 1.0 / (lambda * (sample+1)); double pred_output = 0; for (int i=0; i<num_features; i++) pred_output += W[index * num_features + i] * X[(n_samples_per_thread * index + sample) * num_features + i]; if (Y[n_samples_per_thread * index + sample] * pred_output >= 1.0) { for (int i=0; i<num_features; i++) W[index * num_features + i] = (1.0 - lr * lambda) * W[index * num_features + i]; } else { for (int i=0; i<num_features; i++) W[index * num_features + i] = (1.0 - lr * lambda) * W[index * num_features + i] + (lr * Y[n_samples_per_thread * index + sample]) * X[(n_samples_per_thread * index + sample) * num_features + i]; } } } int main() { srand(time(NULL)); ifstream trainfile ("train.txt"); ifstream labelfile ("labels.txt"); int n_samples=20000; int n_features=500; int k = 100; int numBlocks = (k + BLOCK_SIZE - 1) / BLOCK_SIZE; int num_iters = 10; double lambda = 1.0; int n_samples_per_thread = n_samples / k; double *W, *X, *Y, *final_W, *random_arr; double *d_W, *d_X, *d_Y, *d_random_arr; W = (double *) malloc(k * n_features * sizeof(double)); final_W = (double *) malloc(n_features * sizeof(double)); X = (double *) malloc(n_samples * n_features * sizeof(double)); Y = (double *) malloc(n_samples * sizeof(double)); random_arr = (double *) malloc(num_iters * sizeof(double)); cudaMalloc(&d_W, k * n_features * sizeof(double)); cudaMalloc(&d_X, n_samples * n_features * sizeof(double)); cudaMalloc(&d_Y, n_samples * sizeof(double)); cudaMalloc(&d_random_arr, num_iters * sizeof(double)); for (int i=0;i<n_samples;i++) { for (int j=0;j<n_features;j++) trainfile >> X[i*n_features + j]; } for (int i=0;i<n_samples;i++) { labelfile >> Y[i]; if (Y[i] == 0) { Y[i] = -1; } } for (int i=0;i<k;i++) { for (int j=0;j<n_features;j++) W[i * n_features + j] = 0; } for (int i=0;i<n_features;i++) final_W[i] = 0; for (int i=0;i<num_iters;i++) random_arr[i] = rand() % n_samples_per_thread; cudaMemcpy(d_X, X, n_samples * n_features * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_W, W, k * n_features * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_Y, Y, n_samples * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_random_arr, random_arr, num_iters * sizeof(double), cudaMemcpyHostToDevice); pegasos_per_thread<<<numBlocks, BLOCK_SIZE>>>(n_samples, n_features, d_W, d_X, d_Y, lambda, num_iters, d_random_arr, k); cudaMemcpy(W, d_W, k * n_features * sizeof(double), cudaMemcpyDeviceToHost); // for (int i=0;i<k;i++) // for (int j=0;j<n_features;j++) // cout << W[i * n_features + j] << " "; for (int i=0;i<k;i++) { for (int j=0;j<n_features;j++) final_W[j] += (W[i * n_features + j]); } for (int i=0;i<n_features;i++) final_W[i] /= k; // cout << "\nFinalW\n"; // for (int i=0;i<n_features;i++) {cout << final_W[i] << " ";} double correct = 0.0; for (int i=0;i<n_samples;i++) { double val = 0.0; for (int j=0;j<n_features;j++) val += final_W[j] * X[i * n_features + j]; if (val * Y[i] >= 0) correct += 1; } cout << "Correct " << correct << endl; printf("Accuracy %lf\n", correct / n_samples); return 0; }
28c2b4aeb0d3117e50ef7e66a5afb3237f0e459a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Ben Bennett // CIS 531 // CUDA version of the flocking algorithm #include <iostream> #include <math.h> #include <chrono> #include <thread> #include <string.h> #include<X11/Xlib.h> #include<X11/XKBlib.h> #include<GL/glx.h> #include<GL/glu.h> #include "Vector2.cuh" #include "../common/WindowManager.h" XWindowAttributes wa; // Number of boids (optional command line arg) int count = 100; // Half the width of the world (-width to width, with 0,0 in the center) int width = 30; // Speed of the boids GLfloat speed = 3; // Local radius for each boid (optional command line arg) GLfloat radius = 1.0; // Weights for separation, cohesion, alignment, and destination (optional command line args) float sepWeight = 1.0; float cohWeight = 1.0; float aliWeight = 1.0; float destWeight = 0.01; // This weight must be small to work well // Running time of the simulation (optional command line arg) float runtime = 99999.0; // Draw flag (optional command line arg) bool draw = true; int deviceIndex = 0; Vector2 center = Vector2(0.0,0.0); // Position and direction arrays Vector2* position; Vector2* direction; Vector2* tempPos; Vector2* tempDir; Vector2* cu_pos; Vector2* cu_dir; Vector2* cu_tempPos; Vector2* cu_tempDir; // Copy array utility function void copy(Vector2 from[], Vector2 to[], int length) { for(int i = 0; i < length; i++) { to[i] = from[i]; } } // Draw a red triangle void drawTriangle(GLfloat size) { glBegin(GL_TRIANGLES); glColor3f(204./255.,0.0,0.0); glVertex3f(-size/2.,0.0,0.0); glVertex3f(size/2.,0.0,0.0); glVertex3f(0.0,size,0.0); glEnd(); } // Draw all the boids. This could be improved using instancing void drawBoids(WindowManager* mgr) { float aspect_ratio; XGetWindowAttributes(mgr->getDisplay(), mgr->getWindow(), &wa); glViewport(0, 0, wa.width, wa.height); aspect_ratio = (float)(wa.width) / (float)(wa.height); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(-width*aspect_ratio, width*aspect_ratio, -width, width, 1., 100.); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); gluLookAt(0., 0., -10, 0., 0., 0., 0., 1., 0.); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); for(int i = 0; i < count; i++) { glPushMatrix(); glTranslatef(position[i].x, position[i].y, 0.0); drawTriangle(0.20f); glPopMatrix(); } glXSwapBuffers(mgr->getDisplay(), mgr->getWindow()); } // Process the command line args void processArgs(int argc, char* argv[]) { if(argc > 1) count = atoi(argv[1]); if(argc > 2) runtime = std::stof(argv[2]); if(argc > 3) draw = (bool)atoi(argv[3]); if(argc > 4) radius = std::stof(argv[4]); if(argc > 5) sepWeight = std::stof(argv[5]); if(argc > 6) cohWeight = std::stof(argv[6]); if(argc > 7) aliWeight = std::stof(argv[7]); if(argc > 8) destWeight = std::stof(argv[8]); if(argc > 9) deviceIndex = atoi(argv[9]); } // Initialize the position and direction of each boid randomly void init() { srand(time(NULL)); position = new Vector2[count]; direction = new Vector2[count]; tempPos = new Vector2[count]; tempDir = new Vector2[count]; // Randomize starting position and direction for(int i=0; i < count; i++) { position[i] = Vector2((GLfloat)rand()/(GLfloat)RAND_MAX*2.0*width - width, (GLfloat)rand()/(GLfloat)RAND_MAX*2.0*width - width); GLfloat angle = (GLfloat)rand()/(GLfloat)RAND_MAX * M_PI*2.0; direction[i] = Vector2(cos(angle), sin(angle)); tempPos[i] = position[i]; tempDir[i] = direction[i]; } } __device__ // Calculate the separation, cohesion, and alignment vectors void calculateProperties(Vector2& sep, Vector2& coh, Vector2& ali, int index, int c, float radius, Vector2* cu_pos, Vector2* cu_dir) { int num = 0; for(int i=0; i < c; i++) { if(i != index) { Vector2 diff = cu_pos[index]-cu_pos[i]; if(diff.magnitudeSquared() < radius*radius) { float d = diff.magnitude(); num++; ali = ali + cu_dir[i]; coh = coh + cu_pos[i]; sep = sep + ((diff)*(1.0/d)); } } } if(num > 0) { float z = 1.0/(float)num; ali = ali*z; coh = (coh*z)-cu_pos[index]; sep = sep*z; } } // CUDA kernel. Updates the position and direction for a single boid. Updates based on the rules for // cohesion, separation, and alignment, as well as a fourth rule to keep in screen __global__ void updateBoids(float deltaTime, float speed, float sepWeight, float cohWeight, float aliWeight, float destWeight, float radius, int c, Vector2* cu_pos, Vector2* cu_dir, Vector2* cu_tempPos, Vector2* cu_tempDir) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < c) { Vector2 sep; Vector2 coh; Vector2 ali; calculateProperties(sep, coh, ali, i, c, radius, cu_pos, cu_dir); Vector2 dest = Vector2(0.0,0.0)-cu_pos[i]; Vector2 dir = (((sep * sepWeight) + (coh * cohWeight) + (ali * aliWeight) + (dest * destWeight)) * .25); if (dir.x != 0.0 && dir.y != 0.0) cu_tempDir[i] = dir.normalize(); cu_tempPos[i] = cu_tempPos[i] + cu_tempDir[i] * speed * deltaTime; } } int main(int argc, char* argv[]) { processArgs(argc, argv); std::cout << "Count:" << count << " RunTime:" << runtime << " Draw:" << draw << " Radius:" << radius << " SeparationWeight:" << sepWeight << " CohesionWeight:" << cohWeight << " AlignmentWeight:" << aliWeight << " DestinationWeight:" << destWeight << " DeviceIndex: " << deviceIndex << std::endl; WindowManager mgr; mgr.createWindow(); hipSetDevice(deviceIndex); hipDeviceProp_t props; int device = 0; hipGetDevice(&device); hipGetDeviceProperties(&props, device); std::cout << "Using device " << device << ": " << props.name << std::endl; // Boid initialization init(); // Allocate memory on the GPU hipMalloc(&cu_dir, sizeof(Vector2)*count); hipMalloc(&cu_pos, sizeof(Vector2)*count); hipMalloc(&cu_tempDir, sizeof(Vector2)*count); hipMalloc(&cu_tempPos, sizeof(Vector2)*count); // Copy the position and direction values to the GPU hipMemcpy(cu_tempDir, tempDir, sizeof(Vector2)*count, hipMemcpyHostToDevice); hipMemcpy(cu_tempPos, tempPos, sizeof(Vector2)*count, hipMemcpyHostToDevice); // Setup timer float deltaTime = 0.0; float elapsedTime = 0.0; int frameCount = 0; std::chrono::high_resolution_clock::time_point start, end; // Run until the user quits or the designated runtime is reached while(elapsedTime < runtime && !mgr.quit()) { start = std::chrono::high_resolution_clock::now(); // Copy the latest position and direction data to the GPU hipMemcpy(cu_dir, direction, sizeof(Vector2)*count, hipMemcpyHostToDevice); hipMemcpy(cu_pos, position, sizeof(Vector2)*count, hipMemcpyHostToDevice); // Update the position and direction on the GPU hipLaunchKernelGGL(( updateBoids), dim3((count+255)/256), dim3(256), 0, 0, deltaTime, speed, sepWeight, cohWeight, aliWeight, destWeight, radius, count, cu_pos, cu_dir, cu_tempPos, cu_tempDir); // Get the updated position and direction for drawing hipMemcpy(position, cu_tempPos, sizeof(Vector2)*count, hipMemcpyDeviceToHost); hipMemcpy(direction, cu_tempDir, sizeof(Vector2)*count, hipMemcpyDeviceToHost); if(draw) drawBoids(&mgr); end = std::chrono::high_resolution_clock::now(); elapsedTime += deltaTime; frameCount++; deltaTime = std::chrono::duration_cast<std::chrono::duration<float>>(end-start).count(); } // Print the frames per second std::cout << "Average FPS: " << frameCount/elapsedTime << std::endl; hipFree(cu_dir); hipFree(cu_pos); hipFree(cu_tempDir); hipFree(cu_tempPos); mgr.shutdown(); return 0; }
28c2b4aeb0d3117e50ef7e66a5afb3237f0e459a.cu
// Ben Bennett // CIS 531 // CUDA version of the flocking algorithm #include <iostream> #include <math.h> #include <chrono> #include <thread> #include <string.h> #include<X11/Xlib.h> #include<X11/XKBlib.h> #include<GL/glx.h> #include<GL/glu.h> #include "Vector2.cuh" #include "../common/WindowManager.h" XWindowAttributes wa; // Number of boids (optional command line arg) int count = 100; // Half the width of the world (-width to width, with 0,0 in the center) int width = 30; // Speed of the boids GLfloat speed = 3; // Local radius for each boid (optional command line arg) GLfloat radius = 1.0; // Weights for separation, cohesion, alignment, and destination (optional command line args) float sepWeight = 1.0; float cohWeight = 1.0; float aliWeight = 1.0; float destWeight = 0.01; // This weight must be small to work well // Running time of the simulation (optional command line arg) float runtime = 99999.0; // Draw flag (optional command line arg) bool draw = true; int deviceIndex = 0; Vector2 center = Vector2(0.0,0.0); // Position and direction arrays Vector2* position; Vector2* direction; Vector2* tempPos; Vector2* tempDir; Vector2* cu_pos; Vector2* cu_dir; Vector2* cu_tempPos; Vector2* cu_tempDir; // Copy array utility function void copy(Vector2 from[], Vector2 to[], int length) { for(int i = 0; i < length; i++) { to[i] = from[i]; } } // Draw a red triangle void drawTriangle(GLfloat size) { glBegin(GL_TRIANGLES); glColor3f(204./255.,0.0,0.0); glVertex3f(-size/2.,0.0,0.0); glVertex3f(size/2.,0.0,0.0); glVertex3f(0.0,size,0.0); glEnd(); } // Draw all the boids. This could be improved using instancing void drawBoids(WindowManager* mgr) { float aspect_ratio; XGetWindowAttributes(mgr->getDisplay(), mgr->getWindow(), &wa); glViewport(0, 0, wa.width, wa.height); aspect_ratio = (float)(wa.width) / (float)(wa.height); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(-width*aspect_ratio, width*aspect_ratio, -width, width, 1., 100.); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); gluLookAt(0., 0., -10, 0., 0., 0., 0., 1., 0.); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); for(int i = 0; i < count; i++) { glPushMatrix(); glTranslatef(position[i].x, position[i].y, 0.0); drawTriangle(0.20f); glPopMatrix(); } glXSwapBuffers(mgr->getDisplay(), mgr->getWindow()); } // Process the command line args void processArgs(int argc, char* argv[]) { if(argc > 1) count = atoi(argv[1]); if(argc > 2) runtime = std::stof(argv[2]); if(argc > 3) draw = (bool)atoi(argv[3]); if(argc > 4) radius = std::stof(argv[4]); if(argc > 5) sepWeight = std::stof(argv[5]); if(argc > 6) cohWeight = std::stof(argv[6]); if(argc > 7) aliWeight = std::stof(argv[7]); if(argc > 8) destWeight = std::stof(argv[8]); if(argc > 9) deviceIndex = atoi(argv[9]); } // Initialize the position and direction of each boid randomly void init() { srand(time(NULL)); position = new Vector2[count]; direction = new Vector2[count]; tempPos = new Vector2[count]; tempDir = new Vector2[count]; // Randomize starting position and direction for(int i=0; i < count; i++) { position[i] = Vector2((GLfloat)rand()/(GLfloat)RAND_MAX*2.0*width - width, (GLfloat)rand()/(GLfloat)RAND_MAX*2.0*width - width); GLfloat angle = (GLfloat)rand()/(GLfloat)RAND_MAX * M_PI*2.0; direction[i] = Vector2(cos(angle), sin(angle)); tempPos[i] = position[i]; tempDir[i] = direction[i]; } } __device__ // Calculate the separation, cohesion, and alignment vectors void calculateProperties(Vector2& sep, Vector2& coh, Vector2& ali, int index, int c, float radius, Vector2* cu_pos, Vector2* cu_dir) { int num = 0; for(int i=0; i < c; i++) { if(i != index) { Vector2 diff = cu_pos[index]-cu_pos[i]; if(diff.magnitudeSquared() < radius*radius) { float d = diff.magnitude(); num++; ali = ali + cu_dir[i]; coh = coh + cu_pos[i]; sep = sep + ((diff)*(1.0/d)); } } } if(num > 0) { float z = 1.0/(float)num; ali = ali*z; coh = (coh*z)-cu_pos[index]; sep = sep*z; } } // CUDA kernel. Updates the position and direction for a single boid. Updates based on the rules for // cohesion, separation, and alignment, as well as a fourth rule to keep in screen __global__ void updateBoids(float deltaTime, float speed, float sepWeight, float cohWeight, float aliWeight, float destWeight, float radius, int c, Vector2* cu_pos, Vector2* cu_dir, Vector2* cu_tempPos, Vector2* cu_tempDir) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < c) { Vector2 sep; Vector2 coh; Vector2 ali; calculateProperties(sep, coh, ali, i, c, radius, cu_pos, cu_dir); Vector2 dest = Vector2(0.0,0.0)-cu_pos[i]; Vector2 dir = (((sep * sepWeight) + (coh * cohWeight) + (ali * aliWeight) + (dest * destWeight)) * .25); if (dir.x != 0.0 && dir.y != 0.0) cu_tempDir[i] = dir.normalize(); cu_tempPos[i] = cu_tempPos[i] + cu_tempDir[i] * speed * deltaTime; } } int main(int argc, char* argv[]) { processArgs(argc, argv); std::cout << "Count:" << count << " RunTime:" << runtime << " Draw:" << draw << " Radius:" << radius << " SeparationWeight:" << sepWeight << " CohesionWeight:" << cohWeight << " AlignmentWeight:" << aliWeight << " DestinationWeight:" << destWeight << " DeviceIndex: " << deviceIndex << std::endl; WindowManager mgr; mgr.createWindow(); cudaSetDevice(deviceIndex); cudaDeviceProp props; int device = 0; cudaGetDevice(&device); cudaGetDeviceProperties(&props, device); std::cout << "Using device " << device << ": " << props.name << std::endl; // Boid initialization init(); // Allocate memory on the GPU cudaMalloc(&cu_dir, sizeof(Vector2)*count); cudaMalloc(&cu_pos, sizeof(Vector2)*count); cudaMalloc(&cu_tempDir, sizeof(Vector2)*count); cudaMalloc(&cu_tempPos, sizeof(Vector2)*count); // Copy the position and direction values to the GPU cudaMemcpy(cu_tempDir, tempDir, sizeof(Vector2)*count, cudaMemcpyHostToDevice); cudaMemcpy(cu_tempPos, tempPos, sizeof(Vector2)*count, cudaMemcpyHostToDevice); // Setup timer float deltaTime = 0.0; float elapsedTime = 0.0; int frameCount = 0; std::chrono::high_resolution_clock::time_point start, end; // Run until the user quits or the designated runtime is reached while(elapsedTime < runtime && !mgr.quit()) { start = std::chrono::high_resolution_clock::now(); // Copy the latest position and direction data to the GPU cudaMemcpy(cu_dir, direction, sizeof(Vector2)*count, cudaMemcpyHostToDevice); cudaMemcpy(cu_pos, position, sizeof(Vector2)*count, cudaMemcpyHostToDevice); // Update the position and direction on the GPU updateBoids<<<(count+255)/256, 256>>>(deltaTime, speed, sepWeight, cohWeight, aliWeight, destWeight, radius, count, cu_pos, cu_dir, cu_tempPos, cu_tempDir); // Get the updated position and direction for drawing cudaMemcpy(position, cu_tempPos, sizeof(Vector2)*count, cudaMemcpyDeviceToHost); cudaMemcpy(direction, cu_tempDir, sizeof(Vector2)*count, cudaMemcpyDeviceToHost); if(draw) drawBoids(&mgr); end = std::chrono::high_resolution_clock::now(); elapsedTime += deltaTime; frameCount++; deltaTime = std::chrono::duration_cast<std::chrono::duration<float>>(end-start).count(); } // Print the frames per second std::cout << "Average FPS: " << frameCount/elapsedTime << std::endl; cudaFree(cu_dir); cudaFree(cu_pos); cudaFree(cu_tempDir); cudaFree(cu_tempPos); mgr.shutdown(); return 0; }
70f8fb7004e039d0ed39da4278f452ec5fe3ac5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "luaT.h" #include "THH/THH.h" #include "arithmetic.h" /* __global__ void batch_crop_kernel(float* input, const int nCropRows, const int nCropCols, const int iH, const int iW, const int nPlanes){ const int plane = blockIdx.x; if (plane >= nPlanes) return; input += plane * iH * iW; const int tx = threadIdx.x; const int ty = threadIdx.y; if (ty < iH && (ty > iH-nCropRows-1 || ty < nCropRows)) { input[ty*iW + tx] = 0; } if (tx < iW && (tx > iW-nCropCols-1 || tx < nCropCols)) { input[ty*iW + tx] = 0; } } */ __global__ void batch_crop_kernel(float* input, const int nCropRows, const int nCropCols, const int iH, const int iW, const int nPlanes){ const int plane = blockIdx.x; if (plane >= nPlanes) return; input += plane * iH * iW; const int tx = threadIdx.x; const int ty = threadIdx.y; const int tz = threadIdx.z; // top if (tz == 0) { input[ty*iW + tx] = 0; } // bottom if (tz == 1) { input[(iH-ty-1)*iW + tx] = 0; } // left if (tz == 2) { input[tx*iW+ty] = 0; } // right if (tz == 3) { input[tx*iW + (iW-ty-1)] = 0; } /* if (ty < iH && (ty > iH-nCropRows-1 || ty < nCropRows)) { input[ty*iW + tx] = 0; } if (tx < iW && (tx > iW-nCropCols-1 || tx < nCropCols)) { input[ty*iW + tx] = 0; } */ } // we are assuming the input is real, not complex static int crop_zeroborders(lua_State *L) { THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); const int nCropRows = luaL_checknumber(L,2); const int nCropCols = luaL_checknumber(L,3); const int dim = input->nDimension; const int iH = input->size[dim-2]; const int iW = input->size[dim-1]; long nPlanes, nInputPlanes, nOutputPlanes; bool resize = false; if (dim == 4) { resize = true; nOutputPlanes = input->size[0]; nInputPlanes = input->size[1]; nPlanes = nInputPlanes*nOutputPlanes; THCudaTensor_resize3d(NULL,input, nPlanes, iH, iW); } else { nPlanes = input->size[0]; } float* input_data = (float*)THCudaTensor_data(NULL,input); assert(iH == iW); assert(nCropRows == nCropCols); dim3 threads(iH, nCropRows,4); //dim3 threads(iH,iW); dim3 blocks(nPlanes); hipLaunchKernelGGL(( batch_crop_kernel), dim3(blocks), dim3(threads), 0, 0, input_data, nCropRows, nCropCols, iH, iW, nPlanes); if (resize) { THCudaTensor_resize4d(NULL,input, nOutputPlanes, nInputPlanes, iH, iW); } CUDA_LOOK_FOR_ERROR(); return 0; }
70f8fb7004e039d0ed39da4278f452ec5fe3ac5c.cu
#include "luaT.h" #include "THC/THC.h" #include "arithmetic.h" /* __global__ void batch_crop_kernel(float* input, const int nCropRows, const int nCropCols, const int iH, const int iW, const int nPlanes){ const int plane = blockIdx.x; if (plane >= nPlanes) return; input += plane * iH * iW; const int tx = threadIdx.x; const int ty = threadIdx.y; if (ty < iH && (ty > iH-nCropRows-1 || ty < nCropRows)) { input[ty*iW + tx] = 0; } if (tx < iW && (tx > iW-nCropCols-1 || tx < nCropCols)) { input[ty*iW + tx] = 0; } } */ __global__ void batch_crop_kernel(float* input, const int nCropRows, const int nCropCols, const int iH, const int iW, const int nPlanes){ const int plane = blockIdx.x; if (plane >= nPlanes) return; input += plane * iH * iW; const int tx = threadIdx.x; const int ty = threadIdx.y; const int tz = threadIdx.z; // top if (tz == 0) { input[ty*iW + tx] = 0; } // bottom if (tz == 1) { input[(iH-ty-1)*iW + tx] = 0; } // left if (tz == 2) { input[tx*iW+ty] = 0; } // right if (tz == 3) { input[tx*iW + (iW-ty-1)] = 0; } /* if (ty < iH && (ty > iH-nCropRows-1 || ty < nCropRows)) { input[ty*iW + tx] = 0; } if (tx < iW && (tx > iW-nCropCols-1 || tx < nCropCols)) { input[ty*iW + tx] = 0; } */ } // we are assuming the input is real, not complex static int crop_zeroborders(lua_State *L) { THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); const int nCropRows = luaL_checknumber(L,2); const int nCropCols = luaL_checknumber(L,3); const int dim = input->nDimension; const int iH = input->size[dim-2]; const int iW = input->size[dim-1]; long nPlanes, nInputPlanes, nOutputPlanes; bool resize = false; if (dim == 4) { resize = true; nOutputPlanes = input->size[0]; nInputPlanes = input->size[1]; nPlanes = nInputPlanes*nOutputPlanes; THCudaTensor_resize3d(NULL,input, nPlanes, iH, iW); } else { nPlanes = input->size[0]; } float* input_data = (float*)THCudaTensor_data(NULL,input); assert(iH == iW); assert(nCropRows == nCropCols); dim3 threads(iH, nCropRows,4); //dim3 threads(iH,iW); dim3 blocks(nPlanes); batch_crop_kernel<<<blocks, threads>>>(input_data, nCropRows, nCropCols, iH, iW, nPlanes); if (resize) { THCudaTensor_resize4d(NULL,input, nOutputPlanes, nInputPlanes, iH, iW); } CUDA_LOOK_FOR_ERROR(); return 0; }
96a0891aa0ac56e054647766c509b589c36f15f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cfloat> #include "caffe2/core/context_gpu.h" #include "softmax_focal_loss_op.h" namespace caffe2 { namespace { __global__ void SpatialSoftmaxKernel(const int N, const int A, const int H, const int W, const float* Xdata, float* Pdata, const int num_classes) { CUDA_1D_KERNEL_LOOP(index, N * A * H * W) { int D = num_classes * A; int x = index % W; int y = (index / W) % H; int a = (index / (W * H)) % A; int i = index / W / H / A; // Subtract max on each cell for numerical reasons float max_val = -FLT_MAX; for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; max_val = max(max_val, Xdata[idx]); } // Exponentiate float expsum = 0.0f; for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; float expx = exp(Xdata[idx] - max_val); Pdata[idx] = expx; expsum += expx; } // Normalize for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; Pdata[idx] /= expsum; } } } __global__ void SoftmaxFocalLossKernel( const int N, const int A, const int H, const int W, const float* Pdata, const int* targets, float* losses, const float* weight_pos, const float gamma, const float alpha, const int num_classes) { CUDA_1D_KERNEL_LOOP(i, N * A * H * W) { int D = A * num_classes; int x = i % W; int y = (i / W) % H; int a = (i / (W * H)) % A; int n = i / (W * H * A); const int label = static_cast<int>(targets[i]); float Np = max(weight_pos[0], 1.0); float z = (label == 0) * (1 - alpha) / Np + (label >= 1) * alpha / Np; losses[i] = 0.0; if (label >= 0) { int offset = a * num_classes; int idx = n * (H * W * D) + (offset + label) * (H * W) + y * W + x; losses[i] = -(pow(1.0f - Pdata[idx], gamma) * log(max(Pdata[idx], FLT_MIN))) * z; } } } __global__ void SoftmaxFocalLossGradientWeightKernel( const int N, const int A, const int H, const int W, const float* Pdata, const int* targets, float* buff, const float* weight_pos, const float gamma, const float alpha, const int num_classes) { CUDA_1D_KERNEL_LOOP(i, N * A * H * W) { int D = A * num_classes; int x = i % W; int y = (i / W) % H; int a = (i / (W * H)) % A; int n = i / (W * H * A); const int label = static_cast<int>(targets[i]); float Np = max(weight_pos[0], 1.0); float z = (label == 0) * (1 - alpha) / Np + (label >= 1) * alpha / Np; buff[i] = 0.0; if (label >= 0) { int offset = a * num_classes; int idx = n * (H * W * D) + (offset + label) * (H * W) + y * W + x; float onemp = 1. - Pdata[idx]; float p = Pdata[idx]; buff[i] = (-pow(onemp, gamma) + gamma * pow(onemp, gamma - 1) * p * log(max(p, FLT_MIN))) * z; } } } __global__ void SoftmaxFocalLossGradientKernel( const int N, const int D, const int H, const int W, const float* Pdata, const int* targets, const float* buff, const float* d_loss_data, float* dX, const int num_classes) { CUDA_1D_KERNEL_LOOP(i, N * D * H * W) { int A = D / num_classes; int x = i % W; int y = (i / W) % H; int d = (i / (W * H)) % D; int a = d / num_classes; int c = d % num_classes; int n = i / (W * H * D); float d_loss = *d_loss_data; int ind = n * (H * W * A) + a * (H * W) + y * W + x; const int label = static_cast<int>(targets[ind]); float c1 = (label >= 0) * 1.0; float c2 = (label == c) * 1.0; dX[i] = 0.0; dX[i] = c1 * d_loss * buff[ind] * (c2 - Pdata[i]); } } } // namespace template <> bool SoftmaxFocalLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels auto& wp = Input(2); // num of foregound auto* avg_loss = Output(0); // average loss as output auto* P = Output(1); // softmax probability, going to be re-used in gradient int N = X.dim32(0); int D = X.dim32(1); int H = X.dim32(2); int W = X.dim32(3); int A = D / num_classes_; losses_.Resize(N * A * H * W); P->Resize(N * D * H * W); avg_loss->Resize(vector<int64_t>()); math::Set<float, CUDAContext>( avg_loss->size(), 0.f, avg_loss->mutable_data<float>(), &context_); math::Set<float, CUDAContext>( P->size(), 0.f, P->mutable_data<float>(), &context_); math::Set<float, CUDAContext>( losses_.size(), 0.f, losses_.mutable_data<float>(), &context_); DCHECK_EQ(X.ndim(), 4); const float* Xdata = X.data<float>(); const float* Wdata = wp.data<float>(); // Spatial Softmax Kernel hipLaunchKernelGGL(( SpatialSoftmaxKernel) , dim3(CAFFE_GET_BLOCKS(N * A * H * W)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, A, H, W, Xdata, P->mutable_data<float>(), num_classes_); // Compute loss for each x,y location const int* Tdata = T.data<int>(); hipLaunchKernelGGL(( SoftmaxFocalLossKernel) , dim3(CAFFE_GET_BLOCKS(N * A * H * W)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, A, H, W, P->data<float>(), Tdata, losses_.mutable_data<float>(), Wdata, gamma_, alpha_, num_classes_); // sum the losses float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); math::Scale<float, float, CUDAContext>( 1, scale_, avg_loss_data, avg_loss_data, &context_); return true; } template<> bool SoftmaxFocalLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Label auto& wp = Input(2); // num of foreground example auto& P = Input(3); // Softmax Probability auto& d_avg_loss = Input(4); auto* dX = Output(0); // gradient wrt logits int N = X.dim32(0); int D = X.dim32(1); int H = X.dim32(2); int W = X.dim32(3); int A = D / num_classes_; buff_.Resize(N * A * H * W); dX->ResizeLike(X); const float* Xdata = X.data<float>(); const int* Tdata = T.data<int>(); const float* Pdata = P.data<float>(); const float* Wdata = wp.data<float>(); // Compute the weight for gradients hipLaunchKernelGGL(( SoftmaxFocalLossGradientWeightKernel) , dim3(CAFFE_GET_BLOCKS(N * A * H * W)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, A, H, W, Pdata, Tdata, buff_.mutable_data<float>(), Wdata, gamma_, alpha_, num_classes_); // Compute the gradient with the weights const float* Bdata = buff_.data<float>(); hipLaunchKernelGGL(( SoftmaxFocalLossGradientKernel) , dim3(CAFFE_GET_BLOCKS(N * D * H * W)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, H, W, Pdata, Tdata, Bdata, d_avg_loss.data<float>(), dX->mutable_data<float>(), num_classes_); math::Scale<float, float, CUDAContext>( dX->size(), scale_, dX->data<float>(), dX->mutable_data<float>(), &context_); return true; } REGISTER_CUDA_OPERATOR(SoftmaxFocalLoss, SoftmaxFocalLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxFocalLossGradient, SoftmaxFocalLossGradientOp<float, CUDAContext>); } // namespace caffe2
96a0891aa0ac56e054647766c509b589c36f15f5.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cfloat> #include "caffe2/core/context_gpu.h" #include "softmax_focal_loss_op.h" namespace caffe2 { namespace { __global__ void SpatialSoftmaxKernel(const int N, const int A, const int H, const int W, const float* Xdata, float* Pdata, const int num_classes) { CUDA_1D_KERNEL_LOOP(index, N * A * H * W) { int D = num_classes * A; int x = index % W; int y = (index / W) % H; int a = (index / (W * H)) % A; int i = index / W / H / A; // Subtract max on each cell for numerical reasons float max_val = -FLT_MAX; for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; max_val = max(max_val, Xdata[idx]); } // Exponentiate float expsum = 0.0f; for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; float expx = exp(Xdata[idx] - max_val); Pdata[idx] = expx; expsum += expx; } // Normalize for(int c = a * num_classes; c < (a + 1) * num_classes; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; Pdata[idx] /= expsum; } } } __global__ void SoftmaxFocalLossKernel( const int N, const int A, const int H, const int W, const float* Pdata, const int* targets, float* losses, const float* weight_pos, const float gamma, const float alpha, const int num_classes) { CUDA_1D_KERNEL_LOOP(i, N * A * H * W) { int D = A * num_classes; int x = i % W; int y = (i / W) % H; int a = (i / (W * H)) % A; int n = i / (W * H * A); const int label = static_cast<int>(targets[i]); float Np = max(weight_pos[0], 1.0); float z = (label == 0) * (1 - alpha) / Np + (label >= 1) * alpha / Np; losses[i] = 0.0; if (label >= 0) { int offset = a * num_classes; int idx = n * (H * W * D) + (offset + label) * (H * W) + y * W + x; losses[i] = -(pow(1.0f - Pdata[idx], gamma) * log(max(Pdata[idx], FLT_MIN))) * z; } } } __global__ void SoftmaxFocalLossGradientWeightKernel( const int N, const int A, const int H, const int W, const float* Pdata, const int* targets, float* buff, const float* weight_pos, const float gamma, const float alpha, const int num_classes) { CUDA_1D_KERNEL_LOOP(i, N * A * H * W) { int D = A * num_classes; int x = i % W; int y = (i / W) % H; int a = (i / (W * H)) % A; int n = i / (W * H * A); const int label = static_cast<int>(targets[i]); float Np = max(weight_pos[0], 1.0); float z = (label == 0) * (1 - alpha) / Np + (label >= 1) * alpha / Np; buff[i] = 0.0; if (label >= 0) { int offset = a * num_classes; int idx = n * (H * W * D) + (offset + label) * (H * W) + y * W + x; float onemp = 1. - Pdata[idx]; float p = Pdata[idx]; buff[i] = (-pow(onemp, gamma) + gamma * pow(onemp, gamma - 1) * p * log(max(p, FLT_MIN))) * z; } } } __global__ void SoftmaxFocalLossGradientKernel( const int N, const int D, const int H, const int W, const float* Pdata, const int* targets, const float* buff, const float* d_loss_data, float* dX, const int num_classes) { CUDA_1D_KERNEL_LOOP(i, N * D * H * W) { int A = D / num_classes; int x = i % W; int y = (i / W) % H; int d = (i / (W * H)) % D; int a = d / num_classes; int c = d % num_classes; int n = i / (W * H * D); float d_loss = *d_loss_data; int ind = n * (H * W * A) + a * (H * W) + y * W + x; const int label = static_cast<int>(targets[ind]); float c1 = (label >= 0) * 1.0; float c2 = (label == c) * 1.0; dX[i] = 0.0; dX[i] = c1 * d_loss * buff[ind] * (c2 - Pdata[i]); } } } // namespace template <> bool SoftmaxFocalLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels auto& wp = Input(2); // num of foregound auto* avg_loss = Output(0); // average loss as output auto* P = Output(1); // softmax probability, going to be re-used in gradient int N = X.dim32(0); int D = X.dim32(1); int H = X.dim32(2); int W = X.dim32(3); int A = D / num_classes_; losses_.Resize(N * A * H * W); P->Resize(N * D * H * W); avg_loss->Resize(vector<int64_t>()); math::Set<float, CUDAContext>( avg_loss->size(), 0.f, avg_loss->mutable_data<float>(), &context_); math::Set<float, CUDAContext>( P->size(), 0.f, P->mutable_data<float>(), &context_); math::Set<float, CUDAContext>( losses_.size(), 0.f, losses_.mutable_data<float>(), &context_); DCHECK_EQ(X.ndim(), 4); const float* Xdata = X.data<float>(); const float* Wdata = wp.data<float>(); // Spatial Softmax Kernel SpatialSoftmaxKernel <<<CAFFE_GET_BLOCKS(N * A * H * W), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, A, H, W, Xdata, P->mutable_data<float>(), num_classes_); // Compute loss for each x,y location const int* Tdata = T.data<int>(); SoftmaxFocalLossKernel <<<CAFFE_GET_BLOCKS(N * A * H * W), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, A, H, W, P->data<float>(), Tdata, losses_.mutable_data<float>(), Wdata, gamma_, alpha_, num_classes_); // sum the losses float* avg_loss_data = avg_loss->mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.size(), losses_.data<float>(), avg_loss_data, &context_); math::Scale<float, float, CUDAContext>( 1, scale_, avg_loss_data, avg_loss_data, &context_); return true; } template<> bool SoftmaxFocalLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Label auto& wp = Input(2); // num of foreground example auto& P = Input(3); // Softmax Probability auto& d_avg_loss = Input(4); auto* dX = Output(0); // gradient wrt logits int N = X.dim32(0); int D = X.dim32(1); int H = X.dim32(2); int W = X.dim32(3); int A = D / num_classes_; buff_.Resize(N * A * H * W); dX->ResizeLike(X); const float* Xdata = X.data<float>(); const int* Tdata = T.data<int>(); const float* Pdata = P.data<float>(); const float* Wdata = wp.data<float>(); // Compute the weight for gradients SoftmaxFocalLossGradientWeightKernel <<<CAFFE_GET_BLOCKS(N * A * H * W), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, A, H, W, Pdata, Tdata, buff_.mutable_data<float>(), Wdata, gamma_, alpha_, num_classes_); // Compute the gradient with the weights const float* Bdata = buff_.data<float>(); SoftmaxFocalLossGradientKernel <<<CAFFE_GET_BLOCKS(N * D * H * W), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, H, W, Pdata, Tdata, Bdata, d_avg_loss.data<float>(), dX->mutable_data<float>(), num_classes_); math::Scale<float, float, CUDAContext>( dX->size(), scale_, dX->data<float>(), dX->mutable_data<float>(), &context_); return true; } REGISTER_CUDA_OPERATOR(SoftmaxFocalLoss, SoftmaxFocalLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxFocalLossGradient, SoftmaxFocalLossGradientOp<float, CUDAContext>); } // namespace caffe2
843a31f8ff0a4a0d0ee74a8f7aa7b494092c4da8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rotate.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float b = 2; float *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int sx = 1; int sy = 1; int sz = 1; int dx = 1; int dy = 1; int dz = 1; int ux = 1; int uy = 1; int uz = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rotate), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,sx,sy,sz,dx,dy,dz,ux,uy,uz); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rotate), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,sx,sy,sz,dx,dy,dz,ux,uy,uz); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rotate), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,sx,sy,sz,dx,dy,dz,ux,uy,uz); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
843a31f8ff0a4a0d0ee74a8f7aa7b494092c4da8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rotate.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float b = 2; float *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int sx = 1; int sy = 1; int sz = 1; int dx = 1; int dy = 1; int dz = 1; int ux = 1; int uy = 1; int uz = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rotate<<<gridBlock,threadBlock>>>(a,b,c,sx,sy,sz,dx,dy,dz,ux,uy,uz); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rotate<<<gridBlock,threadBlock>>>(a,b,c,sx,sy,sz,dx,dy,dz,ux,uy,uz); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rotate<<<gridBlock,threadBlock>>>(a,b,c,sx,sy,sz,dx,dy,dz,ux,uy,uz); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9d2c8a82310d13539e07d738bfa269f65c1503d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "libraries/criterion/cuda/ViterbiPath.cuh" #include <cmath> #include <hipcub/hipcub.hpp> #include "libraries/common/Workspace.h" namespace { constexpr int kBlockSize = 32; template <class Float> struct WorkspacePtrs { explicit WorkspacePtrs(void* workspace, int B, int T, int N) { w2l::Workspace<> ws(workspace); ws.request(&alpha, B, T, N); ws.request(&beta, B, T, N); requiredSize = ws.requiredSize(); } Float* alpha; int* beta; size_t requiredSize; }; /* * B thread blocks * kBlockSize threads/block */ template <class Float> __global__ void computeInitial(int T, int N, const Float* input, WorkspacePtrs<Float> ws) { int b = blockIdx.x; for (int n = threadIdx.x; n < N; n += blockDim.x) { int k = b * T * N + n; ws.alpha[k] = input[k]; } } /* * B * N thread blocks (B if Final) * kBlockSize threads/block */ template <bool Final, class Float> __global__ void computeStep( int T, int N, int t, const Float* input, const Float* trans, int* _path, WorkspacePtrs<Float> ws) { int b, m; if (Final) { b = blockIdx.x; } else { b = blockIdx.x / N; m = blockIdx.x % N; } const auto* alphaPrev = &ws.alpha[b * T * N + (t - 1) * N]; const auto* inputCur = &input[b * T * N + t * N]; auto* alphaCur = &ws.alpha[b * T * N + t * N]; auto* betaCur = &ws.beta[b * T * N + t * N]; using BlockReduce = hipcub::BlockReduce<hipcub::KeyValuePair<int, Float>, kBlockSize>; __shared__ typename BlockReduce::TempStorage tempStorage; hipcub::KeyValuePair<int, Float> threadMax; threadMax.value = -INFINITY; for (int n = threadIdx.x; n < N; n += blockDim.x) { Float val = alphaPrev[n] + (Final ? 0 : trans[m * N + n]); if (val > threadMax.value) { threadMax.key = n; threadMax.value = val; } } auto result = BlockReduce(tempStorage).Reduce(threadMax, hipcub::ArgMax()); if (threadIdx.x == 0) { if (Final) { auto* path = &_path[b * T]; path[T - 1] = result.key; for (int s = T - 1; s > 0; --s) { path[s - 1] = ws.beta[b * T * N + s * N + path[s]]; } } else { alphaCur[m] = result.value + inputCur[m]; betaCur[m] = result.key; } } } } // namespace namespace w2l { namespace cuda { template <class Float> size_t ViterbiPath<Float>::getWorkspaceSize(int B, int T, int N) { return WorkspacePtrs<Float>(nullptr, B, T, N).requiredSize; } template <class Float> void ViterbiPath<Float>::compute( int B, int T, int N, const Float* input, const Float* trans, int* path, void* workspace, hipStream_t stream) { WorkspacePtrs<Float> ws(workspace, B, T, N); hipLaunchKernelGGL(( computeInitial), dim3(B), dim3(kBlockSize), 0, stream, T, N, input, ws); for (int t = 1; t < T; ++t) { hipLaunchKernelGGL(( computeStep<false>) , dim3(B * N), dim3(kBlockSize), 0, stream, T, N, t, input, trans, path, ws); } hipLaunchKernelGGL(( computeStep<true>) , dim3(B), dim3(kBlockSize), 0, stream, T, N, T, input, trans, path, ws); } template struct ViterbiPath<float>; template struct ViterbiPath<double>; } // namespace cuda } // namespace w2l
9d2c8a82310d13539e07d738bfa269f65c1503d5.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "libraries/criterion/cuda/ViterbiPath.cuh" #include <cmath> #include <cub/cub.cuh> #include "libraries/common/Workspace.h" namespace { constexpr int kBlockSize = 32; template <class Float> struct WorkspacePtrs { explicit WorkspacePtrs(void* workspace, int B, int T, int N) { w2l::Workspace<> ws(workspace); ws.request(&alpha, B, T, N); ws.request(&beta, B, T, N); requiredSize = ws.requiredSize(); } Float* alpha; int* beta; size_t requiredSize; }; /* * B thread blocks * kBlockSize threads/block */ template <class Float> __global__ void computeInitial(int T, int N, const Float* input, WorkspacePtrs<Float> ws) { int b = blockIdx.x; for (int n = threadIdx.x; n < N; n += blockDim.x) { int k = b * T * N + n; ws.alpha[k] = input[k]; } } /* * B * N thread blocks (B if Final) * kBlockSize threads/block */ template <bool Final, class Float> __global__ void computeStep( int T, int N, int t, const Float* input, const Float* trans, int* _path, WorkspacePtrs<Float> ws) { int b, m; if (Final) { b = blockIdx.x; } else { b = blockIdx.x / N; m = blockIdx.x % N; } const auto* alphaPrev = &ws.alpha[b * T * N + (t - 1) * N]; const auto* inputCur = &input[b * T * N + t * N]; auto* alphaCur = &ws.alpha[b * T * N + t * N]; auto* betaCur = &ws.beta[b * T * N + t * N]; using BlockReduce = cub::BlockReduce<cub::KeyValuePair<int, Float>, kBlockSize>; __shared__ typename BlockReduce::TempStorage tempStorage; cub::KeyValuePair<int, Float> threadMax; threadMax.value = -INFINITY; for (int n = threadIdx.x; n < N; n += blockDim.x) { Float val = alphaPrev[n] + (Final ? 0 : trans[m * N + n]); if (val > threadMax.value) { threadMax.key = n; threadMax.value = val; } } auto result = BlockReduce(tempStorage).Reduce(threadMax, cub::ArgMax()); if (threadIdx.x == 0) { if (Final) { auto* path = &_path[b * T]; path[T - 1] = result.key; for (int s = T - 1; s > 0; --s) { path[s - 1] = ws.beta[b * T * N + s * N + path[s]]; } } else { alphaCur[m] = result.value + inputCur[m]; betaCur[m] = result.key; } } } } // namespace namespace w2l { namespace cuda { template <class Float> size_t ViterbiPath<Float>::getWorkspaceSize(int B, int T, int N) { return WorkspacePtrs<Float>(nullptr, B, T, N).requiredSize; } template <class Float> void ViterbiPath<Float>::compute( int B, int T, int N, const Float* input, const Float* trans, int* path, void* workspace, cudaStream_t stream) { WorkspacePtrs<Float> ws(workspace, B, T, N); computeInitial<<<B, kBlockSize, 0, stream>>>(T, N, input, ws); for (int t = 1; t < T; ++t) { computeStep<false> <<<B * N, kBlockSize, 0, stream>>>(T, N, t, input, trans, path, ws); } computeStep<true> <<<B, kBlockSize, 0, stream>>>(T, N, T, input, trans, path, ws); } template struct ViterbiPath<float>; template struct ViterbiPath<double>; } // namespace cuda } // namespace w2l
cd040a1714100ba234a14304699887b0a0d5d28f.hip
// !!! This is a file automatically generated by hipify!!! // https://github.com/vchizhov/smallpt-explained/blob/master/smallpt_explained.cpp // problem: stack overflow on gpu, too many recursion calls // solution: make radiance function iterative: have a queue where u expand rays (bcs we have reflection+refraction), have it with hardcoded limit // what is implemented: iterative version in case of refl+refrac, just have a probability it will take 1 or the other! // the more elegant code is to have different kernels, also more work per thread, etc. and so on, maybe later #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_math.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand.h> #include <stdio.h> #include "spheres_rays.cuh" #include "radiance.cuh" #define W 1024 #define H 768 #define samps 1024 // samples per subpixel #define BLOCKDIMX 32 #define BLOCKDIMY 2 #define XSTEP 1 //https://forums.developer.nvidia.com/t/hiprand-init-sequence-number-problem/56573 however xorwow is half the time of philox __global__ void smallpt_kernel(float3 *d_img, /*hiprandStatePhilox4_32_10_t*/ hiprandState_t *state, float3 cx, float3 cy, Ray cam){ #pragma unroll for(int step = 0; step < XSTEP; step++){ int idx = blockIdx.x * blockDim.x * XSTEP + threadIdx.x + step * BLOCKDIMX; int idy = blockIdx.y * blockDim.y + threadIdx.y; int id = idy * W + idx; if(idx >= W || idy >= H) return; int i = (H - idy - 1 ) * W + idx; // img comes reversed if(step == 0) { hiprand_init(id, 0, 0, &state[id]); } float3 r = make_float3(0); float3 acum = make_float3(0); #pragma unroll for(int sy = 0; sy < 2; sy++){ #pragma unroll for(int sx = 0; sx < 2; sx++, r = make_float3(0)){ #pragma unroll for(int s = 0; s < samps ; s++){// each sample is independent, can have another grid doing samps/2 and then atomic sum float r1 = 2 * hiprand_uniform (&state[id]); float dx = r1 < 1 ? sqrt(r1) - 1 : 1 - sqrt(2 - r1); float r2 = 2 * hiprand_uniform (&state[id]); float dy = r2 < 1 ? sqrt(r2) - 1 : 1 - sqrt(2 - r2); float3 d = cx * (((sx + .5 + dx) / 2 + idx) / W - .5) + cy * (((sy + .5 + dy) / 2 + idy) / H - .5) + cam.dir; r = r + radiance(Ray(cam.origin + d * 130, normalize(d)), state, id) * (1./samps); } acum = acum + clamp(r, 0, 1) * 0.25; } } d_img[i] = acum; } } int main(){ // https://en.wikipedia.org/wiki/Ray_tracing_(graphics)#Calculate_rays_for_rectangular_viewport Ray cam(make_float3(50, 52 , 295.6), normalize(make_float3(0, -0.042612, -1))); float aspectRatio = W/H; float vfov = 0.502643; float fovScale = 2 * tan(0.5*vfov); float3 cx = make_float3(aspectRatio, 0, 0) * fovScale; float3 cy = normalize(cross(cx, cam.dir)) * fovScale; float3 *h_img = (float3 *)malloc(sizeof(float3) * H * W); // cuda variables /*hiprandStatePhilox4_32_10_t*/ hiprandState_t *devStates; hipMalloc((void **)&devStates, sizeof(/*hiprandStatePhilox4_32_10_t*/ hiprandState_t) * W * H ); checkCudaErrors(hipGetLastError()); float3 *d_img; hipMalloc((void **)&d_img, sizeof(float3) * H * W); checkCudaErrors(hipGetLastError()); hipMemcpyToSymbol(spheres, &spheres_cpu, sizeof(spheres_cpu)); checkCudaErrors(hipGetLastError()); dim3 dimBlock(BLOCKDIMX, BLOCKDIMY); dim3 dimGrid((W + BLOCKDIMX * XSTEP - 1)/BLOCKDIMX/XSTEP, (H + BLOCKDIMY - 1)/BLOCKDIMY); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float elapsed_time; hipEventRecord(start, 0); hipLaunchKernelGGL(( smallpt_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_img, devStates, cx, cy, cam); checkCudaErrors(hipGetLastError()); hipDeviceSynchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf( "Ray Tracing time: %.8f ms\n", elapsed_time); checkCudaErrors(hipGetLastError()); hipMemcpy(h_img, d_img, H * W * sizeof(float3), hipMemcpyDeviceToHost); checkCudaErrors(hipGetLastError()); FILE *f = fopen("image.ppm", "w"); // Write image to PPM file. fprintf(f, "P3\n%d %d\n%d\n", W, H, 255); for (int i = 0; i < W*H; i++) fprintf(f, "%d %d %d ", toInt(h_img[i].x), toInt(h_img[i].y), toInt(h_img[i].z)); hipEventDestroy(start); hipEventDestroy(stop); free(h_img); hipFree(d_img); hipFree(devStates); return 0; }
cd040a1714100ba234a14304699887b0a0d5d28f.cu
// https://github.com/vchizhov/smallpt-explained/blob/master/smallpt_explained.cpp // problem: stack overflow on gpu, too many recursion calls // solution: make radiance function iterative: have a queue where u expand rays (bcs we have reflection+refraction), have it with hardcoded limit // what is implemented: iterative version in case of refl+refrac, just have a probability it will take 1 or the other! // the more elegant code is to have different kernels, also more work per thread, etc. and so on, maybe later #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_math.h> #include <curand_kernel.h> #include <curand.h> #include <stdio.h> #include "spheres_rays.cuh" #include "radiance.cuh" #define W 1024 #define H 768 #define samps 1024 // samples per subpixel #define BLOCKDIMX 32 #define BLOCKDIMY 2 #define XSTEP 1 //https://forums.developer.nvidia.com/t/curand-init-sequence-number-problem/56573 however xorwow is half the time of philox __global__ void smallpt_kernel(float3 *d_img, /*curandStatePhilox4_32_10_t*/ curandState_t *state, float3 cx, float3 cy, Ray cam){ #pragma unroll for(int step = 0; step < XSTEP; step++){ int idx = blockIdx.x * blockDim.x * XSTEP + threadIdx.x + step * BLOCKDIMX; int idy = blockIdx.y * blockDim.y + threadIdx.y; int id = idy * W + idx; if(idx >= W || idy >= H) return; int i = (H - idy - 1 ) * W + idx; // img comes reversed if(step == 0) { curand_init(id, 0, 0, &state[id]); } float3 r = make_float3(0); float3 acum = make_float3(0); #pragma unroll for(int sy = 0; sy < 2; sy++){ #pragma unroll for(int sx = 0; sx < 2; sx++, r = make_float3(0)){ #pragma unroll for(int s = 0; s < samps ; s++){// each sample is independent, can have another grid doing samps/2 and then atomic sum float r1 = 2 * curand_uniform (&state[id]); float dx = r1 < 1 ? sqrt(r1) - 1 : 1 - sqrt(2 - r1); float r2 = 2 * curand_uniform (&state[id]); float dy = r2 < 1 ? sqrt(r2) - 1 : 1 - sqrt(2 - r2); float3 d = cx * (((sx + .5 + dx) / 2 + idx) / W - .5) + cy * (((sy + .5 + dy) / 2 + idy) / H - .5) + cam.dir; r = r + radiance(Ray(cam.origin + d * 130, normalize(d)), state, id) * (1./samps); } acum = acum + clamp(r, 0, 1) * 0.25; } } d_img[i] = acum; } } int main(){ // https://en.wikipedia.org/wiki/Ray_tracing_(graphics)#Calculate_rays_for_rectangular_viewport Ray cam(make_float3(50, 52 , 295.6), normalize(make_float3(0, -0.042612, -1))); float aspectRatio = W/H; float vfov = 0.502643; float fovScale = 2 * tan(0.5*vfov); float3 cx = make_float3(aspectRatio, 0, 0) * fovScale; float3 cy = normalize(cross(cx, cam.dir)) * fovScale; float3 *h_img = (float3 *)malloc(sizeof(float3) * H * W); // cuda variables /*curandStatePhilox4_32_10_t*/ curandState_t *devStates; cudaMalloc((void **)&devStates, sizeof(/*curandStatePhilox4_32_10_t*/ curandState_t) * W * H ); checkCudaErrors(cudaGetLastError()); float3 *d_img; cudaMalloc((void **)&d_img, sizeof(float3) * H * W); checkCudaErrors(cudaGetLastError()); cudaMemcpyToSymbol(spheres, &spheres_cpu, sizeof(spheres_cpu)); checkCudaErrors(cudaGetLastError()); dim3 dimBlock(BLOCKDIMX, BLOCKDIMY); dim3 dimGrid((W + BLOCKDIMX * XSTEP - 1)/BLOCKDIMX/XSTEP, (H + BLOCKDIMY - 1)/BLOCKDIMY); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float elapsed_time; cudaEventRecord(start, 0); smallpt_kernel<<<dimGrid, dimBlock>>>(d_img, devStates, cx, cy, cam); checkCudaErrors(cudaGetLastError()); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf( "Ray Tracing time: %.8f ms\n", elapsed_time); checkCudaErrors(cudaGetLastError()); cudaMemcpy(h_img, d_img, H * W * sizeof(float3), cudaMemcpyDeviceToHost); checkCudaErrors(cudaGetLastError()); FILE *f = fopen("image.ppm", "w"); // Write image to PPM file. fprintf(f, "P3\n%d %d\n%d\n", W, H, 255); for (int i = 0; i < W*H; i++) fprintf(f, "%d %d %d ", toInt(h_img[i].x), toInt(h_img[i].y), toInt(h_img[i].z)); cudaEventDestroy(start); cudaEventDestroy(stop); free(h_img); cudaFree(d_img); cudaFree(devStates); return 0; }
235757ecb1e71d3c89a81c0e65a40fe34b44030c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/kernel_util.cuh" #include "oneflow/core/common/data_type.h" #include "oneflow/core/kernel/util/cuda_half_util.h" #include "oneflow/core/cuda/atomic.cuh" #include "oneflow/core/operator/operator_util.h" #include "oneflow/user/utils/pool_util.h" #include <algorithm> #include <cfloat> #include <cmath> namespace oneflow { namespace user_op { #define START_IND(a, b, c) (int)::floor((float)(a * c) / b) #define END_IND(a, b, c) (int)::ceil((float)((a + 1) * c) / b) #define START_IND_INT(a, b, c) ((a * c) / b) #define END_IND_INT(a, b, c) (((a + 1) * c + b - 1) / b) template<typename T> __global__ void InitPtr(int elements, T* ptr) { int gid = (blockDim.x * blockIdx.x) + threadIdx.x; int step = gridDim.x * blockDim.x; while (gid < elements) { ptr[gid] = static_cast<T>(0); gid += step; } } inline Shape GetShape5D(const Shape& shape, const std::string& data_format, int32_t dim) { FixedDimVector shape_3d = {GetInDim(shape, data_format, 0, dim), GetInDim(shape, data_format, 1, dim), GetInDim(shape, data_format, 2, dim)}; return Shape({shape.At(0), shape.At(1), shape_3d.at(0), shape_3d.at(1), shape_3d.at(2)}); } template<typename T> __global__ void AdaptiveAvgPoolCudaKernel(const T* input, T* output, int num_elems, int in_d, int in_h, int in_w, int out_d, int out_h, int out_w) { const int out_panel_size = out_d * out_h * out_w; const int in_panel_size = in_d * in_h * in_w; CUDA_1D_KERNEL_LOOP(idx, num_elems) { // TODO (Tianyu): Replace following codes with 'NdIndexOffsetHelper' int bc_idx = idx / out_panel_size; int out_d_idx = (idx % out_panel_size) / out_w / out_h; int out_h_idx = (idx % out_panel_size) % (out_h * out_w) / out_w; int out_w_idx = (idx % out_panel_size) % (out_h * out_w) % out_w; int in_start_d = START_IND(out_d_idx, out_d, in_d); int in_end_d = END_IND(out_d_idx, out_d, in_d); int k_d = in_end_d - in_start_d; int in_start_h = START_IND(out_h_idx, out_h, in_h); int in_end_h = END_IND(out_h_idx, out_h, in_h); int k_h = in_end_h - in_start_h; int in_start_w = START_IND(out_w_idx, out_w, in_w); int in_end_w = END_IND(out_w_idx, out_w, in_w); int k_w = in_end_w - in_start_w; const T* in_ptr = input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w; T sum = static_cast<T>(0); for (int id = 0; id < k_d; ++id) { for (int ih = 0; ih < k_h; ++ih) { for (int iw = 0; iw < k_w; ++iw) { T val = *(in_ptr + ih * in_w + iw); sum += val; } } in_ptr += in_h * in_w; // next input depth } // Update output output[idx] = sum / k_d / k_h / k_w; } } template<typename T> __global__ void AdaptiveAvgPoolGradCudaKernel(T* input, const T* output, int num_elems, int in_d, int in_h, int in_w, int out_d, int out_h, int out_w) { const int out_panel_size = out_d * out_h * out_w; const int in_panel_size = in_d * in_h * in_w; CUDA_1D_KERNEL_LOOP(idx, num_elems) { // TODO (Tianyu): Replace following codes with 'NdIndexOffsetHelper' int bc_idx = idx / out_panel_size; int out_d_idx = (idx % out_panel_size) / out_w / out_h; int out_h_idx = (idx % out_panel_size) % (out_h * out_w) / out_w; int out_w_idx = (idx % out_panel_size) % (out_h * out_w) % out_w; int in_start_d = START_IND(out_d_idx, out_d, in_d); int in_end_d = END_IND(out_d_idx, out_d, in_d); int k_d = in_end_d - in_start_d; int in_start_h = START_IND(out_h_idx, out_h, in_h); int in_end_h = END_IND(out_h_idx, out_h, in_h); int k_h = in_end_h - in_start_h; int in_start_w = START_IND(out_w_idx, out_w, in_w); int in_end_w = END_IND(out_w_idx, out_w, in_w); int k_w = in_end_w - in_start_w; const T grad_delta = output[idx] / k_d / k_h / k_w; T* input_ptr = input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w; for (int id = 0; id < k_d; ++id) { for (int ih = 0; ih < k_h; ++ih) { for (int iw = 0; iw < k_w; ++iw) { // TODO (Tianyu): Use 'atmoic::Add' when necessary cuda::atomic::Add(input_ptr + ih * in_w + iw, grad_delta); } } input_ptr += in_h * in_w; // next input depth } } } template<typename T> void AvgForwardCompute(KernelComputeContext* ctx, const int32_t& dim) { const Tensor* in_tensor = ctx->Tensor4ArgNameAndIndex("x", 0); Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("y", 0); const T* in_ptr = in_tensor->dptr<T>(); T* out_ptr = out_tensor->mut_dptr<T>(); const Shape& x_shape = ctx->TensorDesc4ArgNameAndIndex("x", 0)->shape(); const Shape& y_shape = ctx->TensorDesc4ArgNameAndIndex("y", 0)->shape(); // TODO (Tianyu): Support 'channels_last' std::string data_format = "channels_first"; const Shape& in = GetShape5D(x_shape, data_format, dim); const Shape& out = GetShape5D(y_shape, data_format, dim); const int out_elems = out_tensor->shape().elem_cnt(); RUN_CUDA_KERNEL((AdaptiveAvgPoolCudaKernel<T>), ctx->stream(), out_elems, in_ptr, out_ptr, out_elems, in.At(2), in.At(3), in.At(4), out.At(2), out.At(3), out.At(4)); } template<typename T> void AvgBackwardCompute(KernelComputeContext* ctx, const int32_t& dim) { const Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0); Tensor* in_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0); const T* out_ptr = out_tensor->dptr<T>(); T* in_ptr = in_tensor->mut_dptr<T>(); const Shape& dx_shape = ctx->TensorDesc4ArgNameAndIndex("dx", 0)->shape(); const Shape& dy_shape = ctx->TensorDesc4ArgNameAndIndex("dy", 0)->shape(); // TODO (Tianyu): Support 'channels_last' std::string data_format = "channels_first"; const Shape& in = GetShape5D(dx_shape, data_format, dim); const Shape& out = GetShape5D(dy_shape, data_format, dim); const int in_elems = in_tensor->shape().elem_cnt(); const int out_elems = out_tensor->shape().elem_cnt(); RUN_CUDA_KERNEL((InitPtr<T>), ctx->stream(), in_elems, in_elems, in_ptr); RUN_CUDA_KERNEL((AdaptiveAvgPoolGradCudaKernel<T>), ctx->stream(), out_elems, in_ptr, out_ptr, out_elems, in.At(2), in.At(3), in.At(4), out.At(2), out.At(3), out.At(4)); } template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool1dKernel final : public OpKernel { public: GpuAdaptiveAvgPool1dKernel() = default; ~GpuAdaptiveAvgPool1dKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 1); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool2dKernel final : public OpKernel { public: GpuAdaptiveAvgPool2dKernel() = default; ~GpuAdaptiveAvgPool2dKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 2); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool3dKernel final : public OpKernel { public: GpuAdaptiveAvgPool3dKernel() = default; ~GpuAdaptiveAvgPool3dKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 3); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool1dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool1dGradKernel() = default; ~GpuAdaptiveAvgPool1dGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 1); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool2dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool2dGradKernel() = default; ~GpuAdaptiveAvgPool2dGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 2); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool3dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool3dGradKernel() = default; ~GpuAdaptiveAvgPool3dGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 3); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(device, dtype) \ REGISTER_USER_KERNEL("adaptive_avg_pool1d") \ .SetCreateFn<GpuAdaptiveAvgPool1dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool2d") \ .SetCreateFn<GpuAdaptiveAvgPool2dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool3d") \ .SetCreateFn<GpuAdaptiveAvgPool3dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("y", 0) == GetDataType<dtype>::value)); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, float); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, double); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int); #define REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(device, dtype) \ REGISTER_USER_KERNEL("adaptive_avg_pool1d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool1dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("dx", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool2d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool2dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("dx", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool3d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool3dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("dx", 0) == GetDataType<dtype>::value)); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, float); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, double); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, int); } // namespace user_op } // namespace oneflow
235757ecb1e71d3c89a81c0e65a40fe34b44030c.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/framework/framework.h" #include "oneflow/core/kernel/kernel_util.cuh" #include "oneflow/core/common/data_type.h" #include "oneflow/core/kernel/util/cuda_half_util.h" #include "oneflow/core/cuda/atomic.cuh" #include "oneflow/core/operator/operator_util.h" #include "oneflow/user/utils/pool_util.h" #include <algorithm> #include <cfloat> #include <cmath> namespace oneflow { namespace user_op { #define START_IND(a, b, c) (int)std::floor((float)(a * c) / b) #define END_IND(a, b, c) (int)std::ceil((float)((a + 1) * c) / b) #define START_IND_INT(a, b, c) ((a * c) / b) #define END_IND_INT(a, b, c) (((a + 1) * c + b - 1) / b) template<typename T> __global__ void InitPtr(int elements, T* ptr) { int gid = (blockDim.x * blockIdx.x) + threadIdx.x; int step = gridDim.x * blockDim.x; while (gid < elements) { ptr[gid] = static_cast<T>(0); gid += step; } } inline Shape GetShape5D(const Shape& shape, const std::string& data_format, int32_t dim) { FixedDimVector shape_3d = {GetInDim(shape, data_format, 0, dim), GetInDim(shape, data_format, 1, dim), GetInDim(shape, data_format, 2, dim)}; return Shape({shape.At(0), shape.At(1), shape_3d.at(0), shape_3d.at(1), shape_3d.at(2)}); } template<typename T> __global__ void AdaptiveAvgPoolCudaKernel(const T* input, T* output, int num_elems, int in_d, int in_h, int in_w, int out_d, int out_h, int out_w) { const int out_panel_size = out_d * out_h * out_w; const int in_panel_size = in_d * in_h * in_w; CUDA_1D_KERNEL_LOOP(idx, num_elems) { // TODO (Tianyu): Replace following codes with 'NdIndexOffsetHelper' int bc_idx = idx / out_panel_size; int out_d_idx = (idx % out_panel_size) / out_w / out_h; int out_h_idx = (idx % out_panel_size) % (out_h * out_w) / out_w; int out_w_idx = (idx % out_panel_size) % (out_h * out_w) % out_w; int in_start_d = START_IND(out_d_idx, out_d, in_d); int in_end_d = END_IND(out_d_idx, out_d, in_d); int k_d = in_end_d - in_start_d; int in_start_h = START_IND(out_h_idx, out_h, in_h); int in_end_h = END_IND(out_h_idx, out_h, in_h); int k_h = in_end_h - in_start_h; int in_start_w = START_IND(out_w_idx, out_w, in_w); int in_end_w = END_IND(out_w_idx, out_w, in_w); int k_w = in_end_w - in_start_w; const T* in_ptr = input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w; T sum = static_cast<T>(0); for (int id = 0; id < k_d; ++id) { for (int ih = 0; ih < k_h; ++ih) { for (int iw = 0; iw < k_w; ++iw) { T val = *(in_ptr + ih * in_w + iw); sum += val; } } in_ptr += in_h * in_w; // next input depth } // Update output output[idx] = sum / k_d / k_h / k_w; } } template<typename T> __global__ void AdaptiveAvgPoolGradCudaKernel(T* input, const T* output, int num_elems, int in_d, int in_h, int in_w, int out_d, int out_h, int out_w) { const int out_panel_size = out_d * out_h * out_w; const int in_panel_size = in_d * in_h * in_w; CUDA_1D_KERNEL_LOOP(idx, num_elems) { // TODO (Tianyu): Replace following codes with 'NdIndexOffsetHelper' int bc_idx = idx / out_panel_size; int out_d_idx = (idx % out_panel_size) / out_w / out_h; int out_h_idx = (idx % out_panel_size) % (out_h * out_w) / out_w; int out_w_idx = (idx % out_panel_size) % (out_h * out_w) % out_w; int in_start_d = START_IND(out_d_idx, out_d, in_d); int in_end_d = END_IND(out_d_idx, out_d, in_d); int k_d = in_end_d - in_start_d; int in_start_h = START_IND(out_h_idx, out_h, in_h); int in_end_h = END_IND(out_h_idx, out_h, in_h); int k_h = in_end_h - in_start_h; int in_start_w = START_IND(out_w_idx, out_w, in_w); int in_end_w = END_IND(out_w_idx, out_w, in_w); int k_w = in_end_w - in_start_w; const T grad_delta = output[idx] / k_d / k_h / k_w; T* input_ptr = input + bc_idx * in_panel_size + in_start_d * in_h * in_w + in_start_h * in_w + in_start_w; for (int id = 0; id < k_d; ++id) { for (int ih = 0; ih < k_h; ++ih) { for (int iw = 0; iw < k_w; ++iw) { // TODO (Tianyu): Use 'atmoic::Add' when necessary cuda::atomic::Add(input_ptr + ih * in_w + iw, grad_delta); } } input_ptr += in_h * in_w; // next input depth } } } template<typename T> void AvgForwardCompute(KernelComputeContext* ctx, const int32_t& dim) { const Tensor* in_tensor = ctx->Tensor4ArgNameAndIndex("x", 0); Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("y", 0); const T* in_ptr = in_tensor->dptr<T>(); T* out_ptr = out_tensor->mut_dptr<T>(); const Shape& x_shape = ctx->TensorDesc4ArgNameAndIndex("x", 0)->shape(); const Shape& y_shape = ctx->TensorDesc4ArgNameAndIndex("y", 0)->shape(); // TODO (Tianyu): Support 'channels_last' std::string data_format = "channels_first"; const Shape& in = GetShape5D(x_shape, data_format, dim); const Shape& out = GetShape5D(y_shape, data_format, dim); const int out_elems = out_tensor->shape().elem_cnt(); RUN_CUDA_KERNEL((AdaptiveAvgPoolCudaKernel<T>), ctx->stream(), out_elems, in_ptr, out_ptr, out_elems, in.At(2), in.At(3), in.At(4), out.At(2), out.At(3), out.At(4)); } template<typename T> void AvgBackwardCompute(KernelComputeContext* ctx, const int32_t& dim) { const Tensor* out_tensor = ctx->Tensor4ArgNameAndIndex("dy", 0); Tensor* in_tensor = ctx->Tensor4ArgNameAndIndex("dx", 0); const T* out_ptr = out_tensor->dptr<T>(); T* in_ptr = in_tensor->mut_dptr<T>(); const Shape& dx_shape = ctx->TensorDesc4ArgNameAndIndex("dx", 0)->shape(); const Shape& dy_shape = ctx->TensorDesc4ArgNameAndIndex("dy", 0)->shape(); // TODO (Tianyu): Support 'channels_last' std::string data_format = "channels_first"; const Shape& in = GetShape5D(dx_shape, data_format, dim); const Shape& out = GetShape5D(dy_shape, data_format, dim); const int in_elems = in_tensor->shape().elem_cnt(); const int out_elems = out_tensor->shape().elem_cnt(); RUN_CUDA_KERNEL((InitPtr<T>), ctx->stream(), in_elems, in_elems, in_ptr); RUN_CUDA_KERNEL((AdaptiveAvgPoolGradCudaKernel<T>), ctx->stream(), out_elems, in_ptr, out_ptr, out_elems, in.At(2), in.At(3), in.At(4), out.At(2), out.At(3), out.At(4)); } template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool1dKernel final : public OpKernel { public: GpuAdaptiveAvgPool1dKernel() = default; ~GpuAdaptiveAvgPool1dKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 1); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool2dKernel final : public OpKernel { public: GpuAdaptiveAvgPool2dKernel() = default; ~GpuAdaptiveAvgPool2dKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 2); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool3dKernel final : public OpKernel { public: GpuAdaptiveAvgPool3dKernel() = default; ~GpuAdaptiveAvgPool3dKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgForwardCompute<T>(ctx, 3); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool1dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool1dGradKernel() = default; ~GpuAdaptiveAvgPool1dGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 1); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool2dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool2dGradKernel() = default; ~GpuAdaptiveAvgPool2dGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 2); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; template<DeviceType device_type, typename T> class GpuAdaptiveAvgPool3dGradKernel final : public OpKernel { public: GpuAdaptiveAvgPool3dGradKernel() = default; ~GpuAdaptiveAvgPool3dGradKernel() = default; private: using user_op::OpKernel::Compute; void Compute(KernelComputeContext* ctx) const override { AvgBackwardCompute<T>(ctx, 3); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(device, dtype) \ REGISTER_USER_KERNEL("adaptive_avg_pool1d") \ .SetCreateFn<GpuAdaptiveAvgPool1dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool2d") \ .SetCreateFn<GpuAdaptiveAvgPool2dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("y", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool3d") \ .SetCreateFn<GpuAdaptiveAvgPool3dKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("y", 0) == GetDataType<dtype>::value)); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, float); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, double); REGISTER_GPU_ADAPTIVE_AVGPOOL_KERNEL(DeviceType::kGPU, int); #define REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(device, dtype) \ REGISTER_USER_KERNEL("adaptive_avg_pool1d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool1dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("dx", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool2d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool2dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("dx", 0) == GetDataType<dtype>::value)); \ REGISTER_USER_KERNEL("adaptive_avg_pool3d_grad") \ .SetCreateFn<GpuAdaptiveAvgPool3dGradKernel<device, dtype>>() \ .SetIsMatchedHob((HobDeviceType() == device) \ && (HobDataType("dx", 0) == GetDataType<dtype>::value)); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, float); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, double); REGISTER_GPU_ADAPTIVE_AVGPOOL_BACKWARD_KERNEL(DeviceType::kGPU, int); } // namespace user_op } // namespace oneflow
d99ca6f9ceec3b5cff7a22ca2fc9e76c472f9ac4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include "runtime/device.hpp" #include "runtime/error.hpp" #include "runtime/event.hpp" #include "runtime/kernel.cu" #include "runtime/memory.hpp" #include "runtime/version.hpp" #include "timer.hpp" #include <iostream> #include <functional> #include <thread> #define CU_SAFE_CALL(x) \ do { \ auto res = x; \ if(res != hipSuccess) { \ const char *name, *msg; \ hipGetErrorName(res, &name); \ hipGetErrorString(res, &msg); \ std::cerr << "CUDA driver call failed: " \ << msg << " (" << name \ << ")" << std::endl; \ exit(EXIT_FAILURE); \ } \ } while(0) #define CUDA_SAFE_CALL(x) \ do { \ auto res = x; \ if(res != hipSuccess) { \ std::cerr << "CUDA runtimecall failed: " \ << hipGetErrorString(res) \ << " (" \ << hipGetErrorName(res) \ << ")" << std::endl; \ exit(EXIT_FAILURE); \ } \ } while(0) __global__ void addKernel(int *c, const int *a, const int *b, const unsigned int elems, const unsigned int elems_per_thread) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; for (size_t i = idx * elems_per_thread; i < (idx + 1) * elems_per_thread; ++i) { if (i < elems) c[i] = a[i] + b[i]; } } void test_device(const cuda::device &dev, const cuda::host::pinned_buffer<int> &a, const cuda::host::pinned_buffer<int> &b, const unsigned int array_size) { constexpr unsigned int block_size = 1024; constexpr unsigned int grid_size = 1024; constexpr unsigned int elems_per_thread = 5; try { auto c = cuda::host::pinned_buffer<int>(array_size, false, false, true); dev.make_current(); auto kernel = cuda::launch::make_kernel(addKernel); //auto &s1 = cuda::stream::default_stream(); auto s1 = cuda::stream(dev, true); auto s2 = cuda::stream(dev, true); auto evt0 = cuda::event(dev); evt0.record(s1); cuda::host::input_buffer<int> dev_a1(array_size); cuda::host::input_buffer<int> dev_b1(array_size); cuda::host::output_buffer<int> dev_c1(array_size); cuda::host::input_buffer<int> dev_a2(array_size); cuda::host::input_buffer<int> dev_b2(array_size); cuda::host::output_buffer<int> dev_c2(array_size); auto evt1 = cuda::event(s1); s1.enqueue(dev_a1.copy_h2d_async(a)); s1.enqueue(dev_b1.copy_h2d_async(b)); auto evt2 = cuda::event(s1); s1.enqueue(kernel.stream_launch(dim3(grid_size, 1, 1), dim3(block_size, 1, 1), dev_c1.data(), dev_a1.data(), dev_b1.data(), array_size, elems_per_thread)); auto evt3 = cuda::event(s1); s1.enqueue(dev_c1.copy_d2h_async(c)); auto evt4 = cuda::event(s1); s2.enqueue(dev_a2.copy_h2d_async(a)); s2.enqueue(dev_b2.copy_h2d_async(b)); s2.enqueue(kernel.stream_launch(dim3(grid_size, 1, 1), dim3(block_size, 1, 1), dev_c2.data(), dev_a2.data(), dev_b2.data(), array_size, elems_per_thread)); s2.enqueue(dev_c2.copy_d2h_async(c)); // Synchronize (wait for everything to finish executing) dev.synchronize(); std::cout << '\t' << dev.get_properties().name << " (" << dev.used_mem() / 0x100000 << "MB / " << dev.total_mem() / 0x100000 << "MB): " << evt4.elapsed_time(evt0) << "ms (" << evt1.elapsed_time(evt0) << "ms|" << evt2.elapsed_time(evt1) << "ms|" << evt3.elapsed_time(evt2) << "ms|" << evt4.elapsed_time(evt3) << "ms)" << std::endl; } catch (const std::exception &e) { std::cout.flush(); std::cerr << e.what() << std::endl; } } void test_device_native(int devno, int *a, int *b, const unsigned int array_size) { const unsigned int block_size = 1024; const unsigned int grid_size = 1024; const unsigned int elems_per_thread = 5; int *c; CUDA_SAFE_CALL(hipHostMalloc(&c, array_size * sizeof(*c), hipHostMallocPortable | hipHostMallocMapped | hipHostMallocWriteCombined)); hipDevice_t dev; CU_SAFE_CALL(hipDeviceGet(&dev, devno)); hipCtx_t ctx; CU_SAFE_CALL(hipCtxCreate(&ctx, 0, dev)); hipStream_t s1 = {}, s2; CU_SAFE_CALL(hipStreamCreate__(&s2, hipStreamNonBlocking)); hipDeviceptr_t dev_a1, dev_b1, dev_c1; hipDeviceptr_t dev_a2, dev_b2, dev_c2; CU_SAFE_CALL(cuMemAlloc(&dev_a1, array_size * sizeof(*a))); CU_SAFE_CALL(cuMemAlloc(&dev_b1, array_size * sizeof(*b))); CU_SAFE_CALL(cuMemAlloc(&dev_c1, array_size * sizeof(*c))); CU_SAFE_CALL(cuMemAlloc(&dev_a2, array_size * sizeof(*a))); CU_SAFE_CALL(cuMemAlloc(&dev_b2, array_size * sizeof(*b))); CU_SAFE_CALL(cuMemAlloc(&dev_c2, array_size * sizeof(*c))); CU_SAFE_CALL(cuMemcpyHtoDAsync(dev_a1, a, array_size * sizeof(*a), s1)); CU_SAFE_CALL(cuMemcpyHtoDAsync(dev_b1, b, array_size * sizeof(*b), s1)); addKernel << <grid_size, block_size, 0, s1 >> > ((int *)dev_c1, (const int *)dev_a1, (const int *)dev_b1, array_size, elems_per_thread); CU_SAFE_CALL(cuMemcpyDtoHAsync(c, dev_c1, array_size * sizeof(*c), s1)); CU_SAFE_CALL(cuMemcpyHtoDAsync(dev_a2, a, array_size * sizeof(*a), s2)); CU_SAFE_CALL(cuMemcpyHtoDAsync(dev_b2, b, array_size * sizeof(*b), s2)); addKernel << <grid_size, block_size, 0, s2 >> > ((int *)dev_c2, (const int *)dev_a2, (const int *)dev_b2, array_size, elems_per_thread); CU_SAFE_CALL(cuMemcpyDtoHAsync(c, dev_c2, array_size * sizeof(*c), s2)); CU_SAFE_CALL(hipCtxSynchronize()); CU_SAFE_CALL(hipFree(dev_a1)); CU_SAFE_CALL(hipFree(dev_b1)); CU_SAFE_CALL(hipFree(dev_c1)); CU_SAFE_CALL(hipFree(dev_a2)); CU_SAFE_CALL(hipFree(dev_b2)); CU_SAFE_CALL(hipFree(dev_c2)); CUDA_SAFE_CALL(hipHostFree(c)); CU_SAFE_CALL(hipStreamDestroy(s2)); CU_SAFE_CALL(hipCtxDestroy(ctx)); } void test_threaded() { constexpr unsigned int array_size = 12000000; auto a = cuda::host::pinned_buffer<int>(array_size, true, false, false); auto b = cuda::host::pinned_buffer<int>(array_size, true, false, false); for (unsigned int i = 0; i < array_size; ++i) { a[i] = 2 * i; b[i] = array_size - i; } std::vector<std::thread> threads; for (int i = 0; i < cuda::device::count(); ++i) { threads.push_back(std::thread(test_device, std::cref(cuda::device::get(i)), std::cref(a), std::cref(b), array_size)); } for (auto &t : threads) { if (t.joinable()) { t.join(); } } } void test_native_threaded() { constexpr unsigned int array_size = 12000000; int *a, *b; CUDA_SAFE_CALL(hipHostMalloc(&a, array_size * sizeof(*a), hipHostMallocPortable)); CUDA_SAFE_CALL(hipHostMalloc(&b, array_size * sizeof(*b), hipHostMallocPortable)); for (unsigned int i = 0; i < array_size; ++i) { a[i] = 2 * i; b[i] = array_size - i; } int devcount; CU_SAFE_CALL(hipGetDeviceCount(&devcount)); for (int i = 0; i < devcount; ++i) { test_device_native(i, a, b, array_size); } CUDA_SAFE_CALL(hipHostFree(a)); CUDA_SAFE_CALL(hipHostFree(b)); } int main() { // Warm-up run CU_SAFE_CALL(hipInit(0)); //std::cout << "Warming up CUDA..." << std::endl; //test_native(); std::cout << "Running benchmark..." << std::endl; auto timer = cpputils::Timer<>(); test_native_threaded(); //std::cout << "Native completion time: " << timer.duration<>() << "ms" << std::endl; timer.reset(); test_threaded(); std::cout << "Library completion time: " << timer.duration<>() << "ms" << std::endl; return 0; }
d99ca6f9ceec3b5cff7a22ca2fc9e76c472f9ac4.cu
#include <cuda_runtime.h> #include "device_launch_parameters.h" #include "runtime/device.hpp" #include "runtime/error.hpp" #include "runtime/event.hpp" #include "runtime/kernel.cu" #include "runtime/memory.hpp" #include "runtime/version.hpp" #include "timer.hpp" #include <iostream> #include <functional> #include <thread> #define CU_SAFE_CALL(x) \ do { \ auto res = x; \ if(res != CUDA_SUCCESS) { \ const char *name, *msg; \ cuGetErrorName(res, &name); \ cuGetErrorString(res, &msg); \ std::cerr << "CUDA driver call failed: " \ << msg << " (" << name \ << ")" << std::endl; \ exit(EXIT_FAILURE); \ } \ } while(0) #define CUDA_SAFE_CALL(x) \ do { \ auto res = x; \ if(res != cudaSuccess) { \ std::cerr << "CUDA runtimecall failed: " \ << cudaGetErrorString(res) \ << " (" \ << cudaGetErrorName(res) \ << ")" << std::endl; \ exit(EXIT_FAILURE); \ } \ } while(0) __global__ void addKernel(int *c, const int *a, const int *b, const unsigned int elems, const unsigned int elems_per_thread) { unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; for (size_t i = idx * elems_per_thread; i < (idx + 1) * elems_per_thread; ++i) { if (i < elems) c[i] = a[i] + b[i]; } } void test_device(const cuda::device &dev, const cuda::host::pinned_buffer<int> &a, const cuda::host::pinned_buffer<int> &b, const unsigned int array_size) { constexpr unsigned int block_size = 1024; constexpr unsigned int grid_size = 1024; constexpr unsigned int elems_per_thread = 5; try { auto c = cuda::host::pinned_buffer<int>(array_size, false, false, true); dev.make_current(); auto kernel = cuda::launch::make_kernel(addKernel); //auto &s1 = cuda::stream::default_stream(); auto s1 = cuda::stream(dev, true); auto s2 = cuda::stream(dev, true); auto evt0 = cuda::event(dev); evt0.record(s1); cuda::host::input_buffer<int> dev_a1(array_size); cuda::host::input_buffer<int> dev_b1(array_size); cuda::host::output_buffer<int> dev_c1(array_size); cuda::host::input_buffer<int> dev_a2(array_size); cuda::host::input_buffer<int> dev_b2(array_size); cuda::host::output_buffer<int> dev_c2(array_size); auto evt1 = cuda::event(s1); s1.enqueue(dev_a1.copy_h2d_async(a)); s1.enqueue(dev_b1.copy_h2d_async(b)); auto evt2 = cuda::event(s1); s1.enqueue(kernel.stream_launch(dim3(grid_size, 1, 1), dim3(block_size, 1, 1), dev_c1.data(), dev_a1.data(), dev_b1.data(), array_size, elems_per_thread)); auto evt3 = cuda::event(s1); s1.enqueue(dev_c1.copy_d2h_async(c)); auto evt4 = cuda::event(s1); s2.enqueue(dev_a2.copy_h2d_async(a)); s2.enqueue(dev_b2.copy_h2d_async(b)); s2.enqueue(kernel.stream_launch(dim3(grid_size, 1, 1), dim3(block_size, 1, 1), dev_c2.data(), dev_a2.data(), dev_b2.data(), array_size, elems_per_thread)); s2.enqueue(dev_c2.copy_d2h_async(c)); // Synchronize (wait for everything to finish executing) dev.synchronize(); std::cout << '\t' << dev.get_properties().name << " (" << dev.used_mem() / 0x100000 << "MB / " << dev.total_mem() / 0x100000 << "MB): " << evt4.elapsed_time(evt0) << "ms (" << evt1.elapsed_time(evt0) << "ms|" << evt2.elapsed_time(evt1) << "ms|" << evt3.elapsed_time(evt2) << "ms|" << evt4.elapsed_time(evt3) << "ms)" << std::endl; } catch (const std::exception &e) { std::cout.flush(); std::cerr << e.what() << std::endl; } } void test_device_native(int devno, int *a, int *b, const unsigned int array_size) { const unsigned int block_size = 1024; const unsigned int grid_size = 1024; const unsigned int elems_per_thread = 5; int *c; CUDA_SAFE_CALL(cudaHostAlloc(&c, array_size * sizeof(*c), cudaHostAllocPortable | cudaHostAllocMapped | cudaHostAllocWriteCombined)); CUdevice dev; CU_SAFE_CALL(cuDeviceGet(&dev, devno)); CUcontext ctx; CU_SAFE_CALL(cuCtxCreate(&ctx, 0, dev)); CUstream s1 = {}, s2; CU_SAFE_CALL(cuStreamCreate(&s2, CU_STREAM_NON_BLOCKING)); CUdeviceptr dev_a1, dev_b1, dev_c1; CUdeviceptr dev_a2, dev_b2, dev_c2; CU_SAFE_CALL(cuMemAlloc(&dev_a1, array_size * sizeof(*a))); CU_SAFE_CALL(cuMemAlloc(&dev_b1, array_size * sizeof(*b))); CU_SAFE_CALL(cuMemAlloc(&dev_c1, array_size * sizeof(*c))); CU_SAFE_CALL(cuMemAlloc(&dev_a2, array_size * sizeof(*a))); CU_SAFE_CALL(cuMemAlloc(&dev_b2, array_size * sizeof(*b))); CU_SAFE_CALL(cuMemAlloc(&dev_c2, array_size * sizeof(*c))); CU_SAFE_CALL(cuMemcpyHtoDAsync(dev_a1, a, array_size * sizeof(*a), s1)); CU_SAFE_CALL(cuMemcpyHtoDAsync(dev_b1, b, array_size * sizeof(*b), s1)); addKernel << <grid_size, block_size, 0, s1 >> > ((int *)dev_c1, (const int *)dev_a1, (const int *)dev_b1, array_size, elems_per_thread); CU_SAFE_CALL(cuMemcpyDtoHAsync(c, dev_c1, array_size * sizeof(*c), s1)); CU_SAFE_CALL(cuMemcpyHtoDAsync(dev_a2, a, array_size * sizeof(*a), s2)); CU_SAFE_CALL(cuMemcpyHtoDAsync(dev_b2, b, array_size * sizeof(*b), s2)); addKernel << <grid_size, block_size, 0, s2 >> > ((int *)dev_c2, (const int *)dev_a2, (const int *)dev_b2, array_size, elems_per_thread); CU_SAFE_CALL(cuMemcpyDtoHAsync(c, dev_c2, array_size * sizeof(*c), s2)); CU_SAFE_CALL(cuCtxSynchronize()); CU_SAFE_CALL(cuMemFree(dev_a1)); CU_SAFE_CALL(cuMemFree(dev_b1)); CU_SAFE_CALL(cuMemFree(dev_c1)); CU_SAFE_CALL(cuMemFree(dev_a2)); CU_SAFE_CALL(cuMemFree(dev_b2)); CU_SAFE_CALL(cuMemFree(dev_c2)); CUDA_SAFE_CALL(cudaFreeHost(c)); CU_SAFE_CALL(cuStreamDestroy(s2)); CU_SAFE_CALL(cuCtxDestroy(ctx)); } void test_threaded() { constexpr unsigned int array_size = 12000000; auto a = cuda::host::pinned_buffer<int>(array_size, true, false, false); auto b = cuda::host::pinned_buffer<int>(array_size, true, false, false); for (unsigned int i = 0; i < array_size; ++i) { a[i] = 2 * i; b[i] = array_size - i; } std::vector<std::thread> threads; for (int i = 0; i < cuda::device::count(); ++i) { threads.push_back(std::thread(test_device, std::cref(cuda::device::get(i)), std::cref(a), std::cref(b), array_size)); } for (auto &t : threads) { if (t.joinable()) { t.join(); } } } void test_native_threaded() { constexpr unsigned int array_size = 12000000; int *a, *b; CUDA_SAFE_CALL(cudaHostAlloc(&a, array_size * sizeof(*a), cudaHostAllocPortable)); CUDA_SAFE_CALL(cudaHostAlloc(&b, array_size * sizeof(*b), cudaHostAllocPortable)); for (unsigned int i = 0; i < array_size; ++i) { a[i] = 2 * i; b[i] = array_size - i; } int devcount; CU_SAFE_CALL(cuDeviceGetCount(&devcount)); for (int i = 0; i < devcount; ++i) { test_device_native(i, a, b, array_size); } CUDA_SAFE_CALL(cudaFreeHost(a)); CUDA_SAFE_CALL(cudaFreeHost(b)); } int main() { // Warm-up run CU_SAFE_CALL(cuInit(0)); //std::cout << "Warming up CUDA..." << std::endl; //test_native(); std::cout << "Running benchmark..." << std::endl; auto timer = cpputils::Timer<>(); test_native_threaded(); //std::cout << "Native completion time: " << timer.duration<>() << "ms" << std::endl; timer.reset(); test_threaded(); std::cout << "Library completion time: " << timer.duration<>() << "ms" << std::endl; return 0; }
0724a646e944aeb5315df64fb26b61b6ca4f1a84.hip
// !!! This is a file automatically generated by hipify!!! /* Implements the sequential cuda vectors. */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #include <petscconf.h> #include <petsc/private/vecimpl.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/cudavecimpl.h> #include <hip/hip_runtime.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/reduce.h> /* Allocates space for the vector array on the GPU if it does not exist. Does NOT change the PetscCUDAFlag for the vector Does NOT zero the CUDA array */ PetscErrorCode VecCUDAAllocateCheck(Vec v) { PetscErrorCode ierr; hipError_t err; Vec_CUDA *veccuda; PetscBool option_set; PetscFunctionBegin; if (!v->spptr) { PetscReal pinned_memory_min; ierr = PetscCalloc(sizeof(Vec_CUDA),&v->spptr);CHKERRQ(ierr); veccuda = (Vec_CUDA*)v->spptr; err = hipMalloc((void**)&veccuda->GPUarray_allocated,sizeof(PetscScalar)*((PetscBLASInt)v->map->n));CHKERRCUDA(err); veccuda->GPUarray = veccuda->GPUarray_allocated; if (v->offloadmask == PETSC_OFFLOAD_UNALLOCATED) { if (v->data && ((Vec_Seq*)v->data)->array) { v->offloadmask = PETSC_OFFLOAD_CPU; } else { v->offloadmask = PETSC_OFFLOAD_GPU; } } pinned_memory_min = 0; /* Need to parse command line for minimum size to use for pinned memory allocations on host here. Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCreate_MPICUDA_Private(). Is there a good way to avoid this? */ ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)v),((PetscObject)v)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr); ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&option_set);CHKERRQ(ierr); if (option_set) v->minimum_bytes_pinned_memory = pinned_memory_min; ierr = PetscOptionsEnd();CHKERRQ(ierr); } PetscFunctionReturn(0); } /* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */ PetscErrorCode VecCUDACopyToGPU(Vec v) { PetscErrorCode ierr; hipError_t err; Vec_CUDA *veccuda; PetscScalar *varray; PetscFunctionBegin; PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr); if (v->offloadmask == PETSC_OFFLOAD_CPU) { ierr = PetscLogEventBegin(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr); veccuda = (Vec_CUDA*)v->spptr; varray = veccuda->GPUarray; err = hipMemcpy(varray,((Vec_Seq*)v->data)->array,v->map->n*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); ierr = PetscLogCpuToGpu((v->map->n)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr); v->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } /* VecCUDACopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU */ PetscErrorCode VecCUDACopyFromGPU(Vec v) { PetscErrorCode ierr; hipError_t err; Vec_CUDA *veccuda; PetscScalar *varray; PetscFunctionBegin; PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr); if (v->offloadmask == PETSC_OFFLOAD_GPU) { ierr = PetscLogEventBegin(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr); veccuda = (Vec_CUDA*)v->spptr; varray = veccuda->GPUarray; err = hipMemcpy(((Vec_Seq*)v->data)->array,varray,v->map->n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err); ierr = PetscLogGpuToCpu((v->map->n)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr); v->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } /*MC VECSEQCUDA - VECSEQCUDA = "seqcuda" - The basic sequential vector, modified to use CUDA Options Database Keys: . -vec_type seqcuda - sets the vector type to VECSEQCUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecAYPX_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin) { const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; PetscScalar sone = 1.0; hipblasHandle_t cublasv2handle; hipblasStatus_t cberr; hipError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (alpha == (PetscScalar)0.0) { err = hipMemcpy(yarray,xarray,bn*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err); } else if (alpha == (PetscScalar)1.0) { cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr); ierr = PetscLogGpuFlops(1.0*yin->map->n);CHKERRQ(ierr); } else { cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr); cberr = cublasXaxpy(cublasv2handle,bn,&sone,xarray,one,yarray,one);CHKERRCUBLAS(cberr); ierr = PetscLogGpuFlops(2.0*yin->map->n);CHKERRQ(ierr); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecAXPY_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin) { const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; hipblasHandle_t cublasv2handle; hipblasStatus_t cberr; PetscBool xiscuda; hipError_t err; PetscFunctionBegin; if (alpha == (PetscScalar)0.0) PetscFunctionReturn(0); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscObjectTypeCompareAny((PetscObject)xin,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (xiscuda) { ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*yin->map->n);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); } else { ierr = VecAXPY_Seq(yin,alpha,xin);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecPointwiseDivide_SeqCUDA(Vec win, Vec xin, Vec yin) { PetscInt n = xin->map->n; const PetscScalar *xarray=NULL,*yarray=NULL; PetscScalar *warray=NULL; thrust::device_ptr<const PetscScalar> xptr,yptr; thrust::device_ptr<PetscScalar> wptr; PetscErrorCode ierr; hipError_t err; PetscFunctionBegin; ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); try { wptr = thrust::device_pointer_cast(warray); xptr = thrust::device_pointer_cast(xarray); yptr = thrust::device_pointer_cast(yarray); thrust::transform(xptr,xptr+n,yptr,wptr,thrust::divides<PetscScalar>()); err = WaitForCUDA();CHKERRCUDA(err); } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(n);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecWAXPY_SeqCUDA(Vec win,PetscScalar alpha,Vec xin, Vec yin) { const PetscScalar *xarray=NULL,*yarray=NULL; PetscScalar *warray=NULL; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; hipblasHandle_t cublasv2handle; hipblasStatus_t stat; hipError_t cerr; hipStream_t stream; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(win->map->n,&bn);CHKERRQ(ierr); if (alpha == (PetscScalar)0.0) { ierr = VecCopy_SeqCUDA(yin,win);CHKERRQ(ierr); } else { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = hipblasGetStream(cublasv2handle,&stream);CHKERRCUBLAS(stat); cerr = hipMemcpyAsync(warray,yarray,win->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice,stream);CHKERRCUDA(cerr); stat = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,warray,one);CHKERRCUBLAS(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2*win->map->n);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecMAXPY_SeqCUDA(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y) { PetscErrorCode ierr; hipError_t err; PetscInt n = xin->map->n,j; PetscScalar *xarray; const PetscScalar *yarray; PetscBLASInt one = 1,bn = 0; hipblasHandle_t cublasv2handle; hipblasStatus_t cberr; PetscFunctionBegin; ierr = PetscLogGpuFlops(nv*2.0*n);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(nv*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = VecCUDAGetArray(xin,&xarray);CHKERRQ(ierr); for (j=0; j<nv; j++) { ierr = VecCUDAGetArrayRead(y[j],&yarray);CHKERRQ(ierr); cberr = cublasXaxpy(cublasv2handle,bn,alpha+j,yarray,one,xarray,one);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArrayRead(y[j],&yarray);CHKERRQ(ierr); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArray(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z) { const PetscScalar *xarray,*yarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; hipblasHandle_t cublasv2handle; hipblasStatus_t cerr; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); /* arguments y, x are reversed because BLAS complex conjugates the first argument, PETSc the second */ ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cublasXdot(cublasv2handle,bn,yarray,one,xarray,one,z);CHKERRCUBLAS(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (xin->map->n >0) { ierr = PetscLogGpuFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = PetscLogGpuToCpu(sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); } // // CUDA kernels for MDot to follow // // set work group size to be a power of 2 (128 is usually a good compromise between portability and speed) #define MDOT_WORKGROUP_SIZE 128 #define MDOT_WORKGROUP_NUM 128 #if !defined(PETSC_USE_COMPLEX) // M = 2: __global__ void VecMDot_SeqCUDA_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE]; } } // M = 3: __global__ void VecMDot_SeqCUDA_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; } } // M = 4: __global__ void VecMDot_SeqCUDA_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; } } // M = 8: __global__ void VecMDot_SeqCUDA_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; PetscScalar group_sum4 = 0; PetscScalar group_sum5 = 0; PetscScalar group_sum6 = 0; PetscScalar group_sum7 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; group_sum4 += entry_x * y4[i]; group_sum5 += entry_x * y5[i]; group_sum6 += entry_x * y6[i]; group_sum7 += entry_x * y7[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE]; } } #endif /* !defined(PETSC_USE_COMPLEX) */ PetscErrorCode VecMDot_SeqCUDA(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z) { PetscErrorCode ierr; PetscInt i,n = xin->map->n,current_y_index = 0; const PetscScalar *xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr; #if !defined(PETSC_USE_COMPLEX) PetscInt nv1 = ((nv % 4) == 1) ? nv-1: nv,j; PetscScalar *group_results_gpu,group_results_cpu[nv1*MDOT_WORKGROUP_NUM]; hipError_t cuda_ierr; #endif PetscBLASInt one = 1,bn = 0; hipblasHandle_t cublasv2handle; hipblasStatus_t cberr; hipError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUDA not positive."); /* Handle the case of local size zero first */ if (!xin->map->n) { for (i=0; i<nv; ++i) z[i] = 0; PetscFunctionReturn(0); } #if !defined(PETSC_USE_COMPLEX) // allocate scratchpad memory for the results of individual work groups: cuda_ierr = hipMalloc((void**)&group_results_gpu, nv1*sizeof(PetscScalar)*MDOT_WORKGROUP_NUM);CHKERRCUDA(cuda_ierr); #endif ierr = VecCUDAGetArrayRead(xin,&xptr);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); while (current_y_index < nv) { switch (nv - current_y_index) { case 7: case 6: case 5: case 4: ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr); #else hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel4), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu+current_y_index*MDOT_WORKGROUP_NUM); #endif ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr); current_y_index += 4; break; case 3: ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr); #else hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel3), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu+current_y_index*MDOT_WORKGROUP_NUM); #endif ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); current_y_index += 3; break; case 2: ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr); #else hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel2), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,n,group_results_gpu+current_y_index*MDOT_WORKGROUP_NUM); #endif ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); current_y_index += 2; break; case 1: ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr); cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr); current_y_index += 1; break; default: // 8 or more vectors left ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y4ptr,one,xptr,one,&z[current_y_index+4]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y5ptr,one,xptr,one,&z[current_y_index+5]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y6ptr,one,xptr,one,&z[current_y_index+6]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y7ptr,one,xptr,one,&z[current_y_index+7]);CHKERRCUBLAS(cberr); #else hipLaunchKernelGGL(( VecMDot_SeqCUDA_kernel8), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu+current_y_index*MDOT_WORKGROUP_NUM); #endif ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr); current_y_index += 8; break; } } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) ierr = PetscLogGpuToCpu(nv*sizeof(PetscScalar));CHKERRQ(ierr); #else // copy results to CPU cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,nv1*sizeof(PetscScalar)*MDOT_WORKGROUP_NUM,hipMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr); // sum group results into z for (j=0; j<nv1; ++j) { z[j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[j] += group_results_cpu[i]; } ierr = PetscLogFlops(nv1*MDOT_WORKGROUP_NUM);CHKERRQ(ierr); cuda_ierr = hipFree(group_results_gpu);CHKERRCUDA(cuda_ierr); ierr = PetscLogGpuToCpu(nv1*sizeof(PetscScalar)*MDOT_WORKGROUP_NUM);CHKERRQ(ierr); #endif ierr = PetscLogGpuFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr); PetscFunctionReturn(0); } #undef MDOT_WORKGROUP_SIZE #undef MDOT_WORKGROUP_NUM PetscErrorCode VecSet_SeqCUDA(Vec xin,PetscScalar alpha) { PetscInt n = xin->map->n; PetscScalar *xarray = NULL; thrust::device_ptr<PetscScalar> xptr; PetscErrorCode ierr; hipError_t err; PetscFunctionBegin; ierr = VecCUDAGetArrayWrite(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (alpha == (PetscScalar)0.0) { err = hipMemset(xarray,0,n*sizeof(PetscScalar));CHKERRCUDA(err); } else { try { xptr = thrust::device_pointer_cast(xarray); thrust::fill(xptr,xptr+n,alpha); } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecScale_SeqCUDA(Vec xin,PetscScalar alpha) { PetscScalar *xarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; hipblasHandle_t cublasv2handle; hipblasStatus_t cberr; hipError_t err; PetscFunctionBegin; if (alpha == (PetscScalar)0.0) { ierr = VecSet_SeqCUDA(xin,alpha);CHKERRQ(ierr); err = WaitForCUDA();CHKERRCUDA(err); } else if (alpha != (PetscScalar)1.0) { ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArray(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXscal(cublasv2handle,bn,&alpha,xarray,one);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArray(xin,&xarray);CHKERRQ(ierr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogGpuFlops(xin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecTDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z) { const PetscScalar *xarray,*yarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; hipblasHandle_t cublasv2handle; hipblasStatus_t cerr; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cublasXdotu(cublasv2handle,bn,xarray,one,yarray,one,z);CHKERRCUBLAS(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (xin->map->n > 0) { ierr = PetscLogGpuFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = PetscLogGpuToCpu(sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecCopy_SeqCUDA(Vec xin,Vec yin) { const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; hipError_t err; PetscFunctionBegin; if (xin != yin) { if (xin->offloadmask == PETSC_OFFLOAD_GPU) { PetscBool yiscuda; ierr = PetscObjectTypeCompareAny((PetscObject)yin,&yiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); if (yiscuda) { ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecGetArrayWrite(yin,&yarray);CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (yiscuda) { err = hipMemcpyAsync(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice,PetscDefaultCudaStream);CHKERRCUDA(err); } else { err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); if (yiscuda) { ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } } else if (xin->offloadmask == PETSC_OFFLOAD_CPU) { /* copy in CPU if we are on the CPU */ ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr); } else if (xin->offloadmask == PETSC_OFFLOAD_BOTH) { /* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */ if (yin->offloadmask == PETSC_OFFLOAD_CPU) { /* copy in CPU */ ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr); } else if (yin->offloadmask == PETSC_OFFLOAD_GPU) { /* copy in GPU */ ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); err = hipMemcpyAsync(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice,PetscDefaultCudaStream);CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (yin->offloadmask == PETSC_OFFLOAD_BOTH) { /* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck default to copy in GPU (this is an arbitrary choice) */ ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); err = hipMemcpyAsync(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice,PetscDefaultCudaStream);CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr); } } } PetscFunctionReturn(0); } PetscErrorCode VecSwap_SeqCUDA(Vec xin,Vec yin) { PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; PetscScalar *xarray,*yarray; hipblasHandle_t cublasv2handle; hipblasStatus_t cberr; hipError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); if (xin != yin) { ierr = VecCUDAGetArray(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXswap(cublasv2handle,bn,xarray,one,yarray,one);CHKERRCUBLAS(cberr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArray(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecAXPBY_SeqCUDA(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin) { PetscErrorCode ierr; PetscScalar a = alpha,b = beta; const PetscScalar *xarray; PetscScalar *yarray; PetscBLASInt one = 1, bn = 0; hipblasHandle_t cublasv2handle; hipblasStatus_t cberr; hipError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr); if (a == (PetscScalar)0.0) { ierr = VecScale_SeqCUDA(yin,beta);CHKERRQ(ierr); } else if (b == (PetscScalar)1.0) { ierr = VecAXPY_SeqCUDA(yin,alpha,xin);CHKERRQ(ierr); } else if (a == (PetscScalar)1.0) { ierr = VecAYPX_SeqCUDA(yin,beta,xin);CHKERRQ(ierr); } else if (b == (PetscScalar)0.0) { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); err = hipMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),hipMemcpyDeviceToDevice);CHKERRCUDA(err); cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(xin->map->n);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXscal(cublasv2handle,bn,&beta,yarray,one);CHKERRCUBLAS(cberr); cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(3.0*xin->map->n);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(2*sizeof(PetscScalar));CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecAXPBYPCZ_SeqCUDA(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = zin->map->n; PetscFunctionBegin; if (gamma == (PetscScalar)1.0) { /* z = ax + b*y + z */ ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr); ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr); ierr = PetscLogGpuFlops(4.0*n);CHKERRQ(ierr); } else { /* z = a*x + b*y + c*z */ ierr = VecScale_SeqCUDA(zin,gamma);CHKERRQ(ierr); ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr); ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr); ierr = PetscLogGpuFlops(5.0*n);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecPointwiseMult_SeqCUDA(Vec win,Vec xin,Vec yin) { PetscInt n = win->map->n; const PetscScalar *xarray,*yarray; PetscScalar *warray; thrust::device_ptr<const PetscScalar> xptr,yptr; thrust::device_ptr<PetscScalar> wptr; PetscErrorCode ierr; hipError_t err; PetscFunctionBegin; ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); try { wptr = thrust::device_pointer_cast(warray); xptr = thrust::device_pointer_cast(xarray); yptr = thrust::device_pointer_cast(yarray); thrust::transform(xptr,xptr+n,yptr,wptr,thrust::multiplies<PetscScalar>()); err = WaitForCUDA();CHKERRCUDA(err); } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogGpuFlops(n);CHKERRQ(ierr); PetscFunctionReturn(0); } /* should do infinity norm in cuda */ PetscErrorCode VecNorm_SeqCUDA(Vec xin,NormType type,PetscReal *z) { PetscErrorCode ierr; PetscInt n = xin->map->n; PetscBLASInt one = 1, bn = 0; const PetscScalar *xarray; hipblasHandle_t cublasv2handle; hipblasStatus_t cberr; hipError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr); if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXnrm2(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr); } else if (type == NORM_INFINITY) { int i; ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasIXamax(cublasv2handle,bn,xarray,one,&i);CHKERRCUBLAS(cberr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (bn) { PetscScalar zs; err = hipMemcpy(&zs,xarray+i-1,sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(err); *z = PetscAbsScalar(zs); } else *z = 0.0; ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); } else if (type == NORM_1) { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXasum(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr); } else if (type == NORM_1_AND_2) { ierr = VecNorm_SeqCUDA(xin,NORM_1,z);CHKERRQ(ierr); ierr = VecNorm_SeqCUDA(xin,NORM_2,z+1);CHKERRQ(ierr); } ierr = PetscLogGpuToCpu(sizeof(PetscReal));CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecDotNorm2_SeqCUDA(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecDot_SeqCUDA(s,t,dp);CHKERRQ(ierr); ierr = VecDot_SeqCUDA(t,t,nm);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecDestroy_SeqCUDA(Vec v) { PetscErrorCode ierr; hipError_t cerr; Vec_CUDA *veccuda = (Vec_CUDA*)v->spptr; PetscFunctionBegin; if (v->spptr) { if (veccuda->GPUarray_allocated) { #if defined(PETSC_HAVE_NVSHMEM) if (veccuda->nvshmem) { ierr = PetscNvshmemFree(veccuda->GPUarray_allocated);CHKERRQ(ierr); veccuda->nvshmem = PETSC_FALSE; } else #endif {cerr = hipFree(veccuda->GPUarray_allocated);CHKERRCUDA(cerr);} veccuda->GPUarray_allocated = NULL; } if (veccuda->stream) { cerr = hipStreamDestroy(veccuda->stream);CHKERRCUDA(cerr); } } ierr = VecDestroy_SeqCUDA_Private(v);CHKERRQ(ierr); ierr = PetscFree(v->spptr);CHKERRQ(ierr); PetscFunctionReturn(0); } #if defined(PETSC_USE_COMPLEX) struct conjugate { __host__ __device__ PetscScalar operator()(PetscScalar x) { return PetscConj(x); } }; #endif PetscErrorCode VecConjugate_SeqCUDA(Vec xin) { #if defined(PETSC_USE_COMPLEX) PetscScalar *xarray; PetscErrorCode ierr; PetscInt n = xin->map->n; thrust::device_ptr<PetscScalar> xptr; hipError_t err; PetscFunctionBegin; ierr = VecCUDAGetArray(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); try { xptr = thrust::device_pointer_cast(xarray); thrust::transform(xptr,xptr+n,xptr,conjugate()); err = WaitForCUDA();CHKERRCUDA(err); } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArray(xin,&xarray);CHKERRQ(ierr); #else PetscFunctionBegin; #endif PetscFunctionReturn(0); } PetscErrorCode VecGetLocalVector_SeqCUDA(Vec v,Vec w) { PetscErrorCode ierr; hipError_t err; PetscFunctionBegin; PetscValidHeaderSpecific(v,VEC_CLASSID,1); PetscValidHeaderSpecific(w,VEC_CLASSID,2); PetscCheckTypeName(w,VECSEQCUDA); PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); if (w->data) { if (((Vec_Seq*)w->data)->array_allocated) { if (w->pinned_memory) { ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr); } ierr = PetscFree(((Vec_Seq*)w->data)->array_allocated);CHKERRQ(ierr); if (w->pinned_memory) { ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr); w->pinned_memory = PETSC_FALSE; } } ((Vec_Seq*)w->data)->array = NULL; ((Vec_Seq*)w->data)->unplacedarray = NULL; } if (w->spptr) { PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); if (((Vec_CUDA*)w->spptr)->GPUarray) { err = hipFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err); ((Vec_CUDA*)w->spptr)->GPUarray = NULL; } if (((Vec_CUDA*)w->spptr)->stream) { err = hipStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err); } ierr = PetscFree(w->spptr);CHKERRQ(ierr); } if (v->petscnative) { ierr = PetscFree(w->data);CHKERRQ(ierr); w->data = v->data; w->offloadmask = v->offloadmask; w->pinned_memory = v->pinned_memory; w->spptr = v->spptr; ierr = PetscObjectStateIncrease((PetscObject)w);CHKERRQ(ierr); } else { ierr = VecGetArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr); w->offloadmask = PETSC_OFFLOAD_CPU; ierr = VecCUDAAllocateCheck(w);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecRestoreLocalVector_SeqCUDA(Vec v,Vec w) { PetscErrorCode ierr; hipError_t err; PetscFunctionBegin; PetscValidHeaderSpecific(v,VEC_CLASSID,1); PetscValidHeaderSpecific(w,VEC_CLASSID,2); PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); PetscCheckTypeName(w,VECSEQCUDA); if (v->petscnative) { v->data = w->data; v->offloadmask = w->offloadmask; v->pinned_memory = w->pinned_memory; v->spptr = w->spptr; w->data = 0; w->offloadmask = PETSC_OFFLOAD_UNALLOCATED; w->spptr = 0; } else { ierr = VecRestoreArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr); if ((Vec_CUDA*)w->spptr) { err = hipFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err); ((Vec_CUDA*)w->spptr)->GPUarray = NULL; if (((Vec_CUDA*)v->spptr)->stream) { err = hipStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err); } ierr = PetscFree(w->spptr);CHKERRQ(ierr); } } PetscFunctionReturn(0); } struct petscrealpart : public thrust::unary_function<PetscScalar,PetscReal> { __host__ __device__ PetscReal operator()(PetscScalar x) { return PetscRealPart(x); } }; struct petscrealparti : public thrust::unary_function<thrust::tuple<PetscScalar, PetscInt>,thrust::tuple<PetscReal, PetscInt>> { __host__ __device__ thrust::tuple<PetscReal, PetscInt> operator()(thrust::tuple<PetscScalar, PetscInt> x) { return thrust::make_tuple(PetscRealPart(x.get<0>()), x.get<1>()); } }; struct petscmax : public thrust::binary_function<PetscReal,PetscReal,PetscReal> { __host__ __device__ PetscReal operator()(PetscReal x, PetscReal y) { return x < y ? y : x; } }; struct petscmaxi : public thrust::binary_function<thrust::tuple<PetscReal, PetscInt>,thrust::tuple<PetscReal, PetscInt>,thrust::tuple<PetscReal, PetscInt>> { __host__ __device__ thrust::tuple<PetscReal, PetscInt> operator()(thrust::tuple<PetscReal, PetscInt> x, thrust::tuple<PetscReal, PetscInt> y) { return x.get<0>() < y.get<0>() ? thrust::make_tuple(y.get<0>(), y.get<1>()) : (x.get<0>() != y.get<0>() ? thrust::make_tuple(x.get<0>(), x.get<1>()) : (x.get<1>() < y.get<1>() ? thrust::make_tuple(x.get<0>(), x.get<1>()) : thrust::make_tuple(y.get<0>(), y.get<1>()))); } }; struct petscmin : public thrust::binary_function<PetscReal,PetscReal,PetscReal> { __host__ __device__ PetscReal operator()(PetscReal x, PetscReal y) { return x < y ? x : y; } }; struct petscmini : public thrust::binary_function<thrust::tuple<PetscReal, PetscInt>,thrust::tuple<PetscReal, PetscInt>,thrust::tuple<PetscReal, PetscInt>> { __host__ __device__ thrust::tuple<PetscReal, PetscInt> operator()(thrust::tuple<PetscReal, PetscInt> x, thrust::tuple<PetscReal, PetscInt> y) { return x.get<0>() > y.get<0>() ? thrust::make_tuple(y.get<0>(), y.get<1>()) : (x.get<0>() != y.get<0>() ? thrust::make_tuple(x.get<0>(), x.get<1>()) : (x.get<1>() < y.get<1>() ? thrust::make_tuple(x.get<0>(), x.get<1>()) : thrust::make_tuple(y.get<0>(), y.get<1>()))); } }; PetscErrorCode VecMax_SeqCUDA(Vec v, PetscInt *p, PetscReal *m) { PetscErrorCode ierr; PetscInt n = v->map->n; const PetscScalar *av; thrust::device_ptr<const PetscScalar> avpt; PetscFunctionBegin; PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); if (!n) { *m = PETSC_MIN_REAL; if (p) *p = -1; PetscFunctionReturn(0); } ierr = VecCUDAGetArrayRead(v,&av);CHKERRQ(ierr); avpt = thrust::device_pointer_cast(av); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (p) { thrust::tuple<PetscReal,PetscInt> res(PETSC_MIN_REAL,-1); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(avpt,thrust::counting_iterator<PetscInt>(0))); try { #if defined(PETSC_USE_COMPLEX) res = thrust::transform_reduce(zibit,zibit+n,petscrealparti(),res,petscmaxi()); #else res = thrust::reduce(zibit,zibit+n,res,petscmaxi()); #endif } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } *m = res.get<0>(); *p = res.get<1>(); } else { try { #if defined(PETSC_USE_COMPLEX) *m = thrust::transform_reduce(avpt,avpt+n,petscrealpart(),PETSC_MIN_REAL,petscmax()); #else *m = thrust::reduce(avpt,avpt+n,PETSC_MIN_REAL,petscmax()); #endif } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(v,&av);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecMin_SeqCUDA(Vec v, PetscInt *p, PetscReal *m) { PetscErrorCode ierr; PetscInt n = v->map->n; const PetscScalar *av; thrust::device_ptr<const PetscScalar> avpt; PetscFunctionBegin; PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); if (!n) { *m = PETSC_MAX_REAL; if (p) *p = -1; PetscFunctionReturn(0); } ierr = VecCUDAGetArrayRead(v,&av);CHKERRQ(ierr); avpt = thrust::device_pointer_cast(av); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (p) { thrust::tuple<PetscReal,PetscInt> res(PETSC_MAX_REAL,-1); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(avpt,thrust::counting_iterator<PetscInt>(0))); try { #if defined(PETSC_USE_COMPLEX) res = thrust::transform_reduce(zibit,zibit+n,petscrealparti(),res,petscmini()); #else res = thrust::reduce(zibit,zibit+n,res,petscmini()); #endif } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } *m = res.get<0>(); *p = res.get<1>(); } else { try { #if defined(PETSC_USE_COMPLEX) *m = thrust::transform_reduce(avpt,avpt+n,petscrealpart(),PETSC_MAX_REAL,petscmin()); #else *m = thrust::reduce(avpt,avpt+n,PETSC_MAX_REAL,petscmin()); #endif } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(v,&av);CHKERRQ(ierr); PetscFunctionReturn(0); } #if defined(PETSC_HAVE_NVSHMEM) /* Free old CUDA array and re-allocate a new one from nvshmem symmetric heap. New array does not retain values in the old array. The offload mask is not changed. Note: the function is only meant to be used in MatAssemblyEnd_MPIAIJCUSPARSE. */ PetscErrorCode VecAllocateNVSHMEM_SeqCUDA(Vec v) { PetscErrorCode ierr; hipError_t cerr; Vec_CUDA *veccuda = (Vec_CUDA*)v->spptr; PetscInt n; PetscFunctionBegin; cerr = hipFree(veccuda->GPUarray_allocated);CHKERRCUDA(cerr); ierr = VecGetLocalSize(v,&n);CHKERRQ(ierr); ierr = MPIU_Allreduce(MPI_IN_PLACE,&n,1,MPIU_INT,MPI_MAX,PETSC_COMM_WORLD);CHKERRMPI(ierr); ierr = PetscNvshmemMalloc(n*sizeof(PetscScalar),(void**)&veccuda->GPUarray_allocated);CHKERRQ(ierr); veccuda->GPUarray = veccuda->GPUarray_allocated; veccuda->nvshmem = PETSC_TRUE; PetscFunctionReturn(0); } #endif
0724a646e944aeb5315df64fb26b61b6ca4f1a84.cu
/* Implements the sequential cuda vectors. */ #define PETSC_SKIP_SPINLOCK #define PETSC_SKIP_CXX_COMPLEX_FIX #include <petscconf.h> #include <petsc/private/vecimpl.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/cudavecimpl.h> #include <cuda_runtime.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/reduce.h> /* Allocates space for the vector array on the GPU if it does not exist. Does NOT change the PetscCUDAFlag for the vector Does NOT zero the CUDA array */ PetscErrorCode VecCUDAAllocateCheck(Vec v) { PetscErrorCode ierr; cudaError_t err; Vec_CUDA *veccuda; PetscBool option_set; PetscFunctionBegin; if (!v->spptr) { PetscReal pinned_memory_min; ierr = PetscCalloc(sizeof(Vec_CUDA),&v->spptr);CHKERRQ(ierr); veccuda = (Vec_CUDA*)v->spptr; err = cudaMalloc((void**)&veccuda->GPUarray_allocated,sizeof(PetscScalar)*((PetscBLASInt)v->map->n));CHKERRCUDA(err); veccuda->GPUarray = veccuda->GPUarray_allocated; if (v->offloadmask == PETSC_OFFLOAD_UNALLOCATED) { if (v->data && ((Vec_Seq*)v->data)->array) { v->offloadmask = PETSC_OFFLOAD_CPU; } else { v->offloadmask = PETSC_OFFLOAD_GPU; } } pinned_memory_min = 0; /* Need to parse command line for minimum size to use for pinned memory allocations on host here. Note: This same code duplicated in VecCreate_SeqCUDA_Private() and VecCreate_MPICUDA_Private(). Is there a good way to avoid this? */ ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)v),((PetscObject)v)->prefix,"VECCUDA Options","Vec");CHKERRQ(ierr); ierr = PetscOptionsReal("-vec_pinned_memory_min","Minimum size (in bytes) for an allocation to use pinned memory on host","VecSetPinnedMemoryMin",pinned_memory_min,&pinned_memory_min,&option_set);CHKERRQ(ierr); if (option_set) v->minimum_bytes_pinned_memory = pinned_memory_min; ierr = PetscOptionsEnd();CHKERRQ(ierr); } PetscFunctionReturn(0); } /* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */ PetscErrorCode VecCUDACopyToGPU(Vec v) { PetscErrorCode ierr; cudaError_t err; Vec_CUDA *veccuda; PetscScalar *varray; PetscFunctionBegin; PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); ierr = VecCUDAAllocateCheck(v);CHKERRQ(ierr); if (v->offloadmask == PETSC_OFFLOAD_CPU) { ierr = PetscLogEventBegin(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr); veccuda = (Vec_CUDA*)v->spptr; varray = veccuda->GPUarray; err = cudaMemcpy(varray,((Vec_Seq*)v->data)->array,v->map->n*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); ierr = PetscLogCpuToGpu((v->map->n)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(VEC_CUDACopyToGPU,v,0,0,0);CHKERRQ(ierr); v->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } /* VecCUDACopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU */ PetscErrorCode VecCUDACopyFromGPU(Vec v) { PetscErrorCode ierr; cudaError_t err; Vec_CUDA *veccuda; PetscScalar *varray; PetscFunctionBegin; PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); ierr = VecCUDAAllocateCheckHost(v);CHKERRQ(ierr); if (v->offloadmask == PETSC_OFFLOAD_GPU) { ierr = PetscLogEventBegin(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr); veccuda = (Vec_CUDA*)v->spptr; varray = veccuda->GPUarray; err = cudaMemcpy(((Vec_Seq*)v->data)->array,varray,v->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err); ierr = PetscLogGpuToCpu((v->map->n)*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogEventEnd(VEC_CUDACopyFromGPU,v,0,0,0);CHKERRQ(ierr); v->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } /*MC VECSEQCUDA - VECSEQCUDA = "seqcuda" - The basic sequential vector, modified to use CUDA Options Database Keys: . -vec_type seqcuda - sets the vector type to VECSEQCUDA during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq(), VecSetPinnedMemoryMin() M*/ PetscErrorCode VecAYPX_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin) { const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; PetscScalar sone = 1.0; cublasHandle_t cublasv2handle; cublasStatus_t cberr; cudaError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (alpha == (PetscScalar)0.0) { err = cudaMemcpy(yarray,xarray,bn*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err); } else if (alpha == (PetscScalar)1.0) { cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr); ierr = PetscLogGpuFlops(1.0*yin->map->n);CHKERRQ(ierr); } else { cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr); cberr = cublasXaxpy(cublasv2handle,bn,&sone,xarray,one,yarray,one);CHKERRCUBLAS(cberr); ierr = PetscLogGpuFlops(2.0*yin->map->n);CHKERRQ(ierr); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecAXPY_SeqCUDA(Vec yin,PetscScalar alpha,Vec xin) { const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; cublasHandle_t cublasv2handle; cublasStatus_t cberr; PetscBool xiscuda; cudaError_t err; PetscFunctionBegin; if (alpha == (PetscScalar)0.0) PetscFunctionReturn(0); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscObjectTypeCompareAny((PetscObject)xin,&xiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); if (xiscuda) { ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*yin->map->n);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); } else { ierr = VecAXPY_Seq(yin,alpha,xin);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecPointwiseDivide_SeqCUDA(Vec win, Vec xin, Vec yin) { PetscInt n = xin->map->n; const PetscScalar *xarray=NULL,*yarray=NULL; PetscScalar *warray=NULL; thrust::device_ptr<const PetscScalar> xptr,yptr; thrust::device_ptr<PetscScalar> wptr; PetscErrorCode ierr; cudaError_t err; PetscFunctionBegin; ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); try { wptr = thrust::device_pointer_cast(warray); xptr = thrust::device_pointer_cast(xarray); yptr = thrust::device_pointer_cast(yarray); thrust::transform(xptr,xptr+n,yptr,wptr,thrust::divides<PetscScalar>()); err = WaitForCUDA();CHKERRCUDA(err); } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(n);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecWAXPY_SeqCUDA(Vec win,PetscScalar alpha,Vec xin, Vec yin) { const PetscScalar *xarray=NULL,*yarray=NULL; PetscScalar *warray=NULL; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; cublasHandle_t cublasv2handle; cublasStatus_t stat; cudaError_t cerr; cudaStream_t stream; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(win->map->n,&bn);CHKERRQ(ierr); if (alpha == (PetscScalar)0.0) { ierr = VecCopy_SeqCUDA(yin,win);CHKERRQ(ierr); } else { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); stat = cublasGetStream(cublasv2handle,&stream);CHKERRCUBLAS(stat); cerr = cudaMemcpyAsync(warray,yarray,win->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice,stream);CHKERRCUDA(cerr); stat = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,warray,one);CHKERRCUBLAS(stat); cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2*win->map->n);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecMAXPY_SeqCUDA(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y) { PetscErrorCode ierr; cudaError_t err; PetscInt n = xin->map->n,j; PetscScalar *xarray; const PetscScalar *yarray; PetscBLASInt one = 1,bn = 0; cublasHandle_t cublasv2handle; cublasStatus_t cberr; PetscFunctionBegin; ierr = PetscLogGpuFlops(nv*2.0*n);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(nv*sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = VecCUDAGetArray(xin,&xarray);CHKERRQ(ierr); for (j=0; j<nv; j++) { ierr = VecCUDAGetArrayRead(y[j],&yarray);CHKERRQ(ierr); cberr = cublasXaxpy(cublasv2handle,bn,alpha+j,yarray,one,xarray,one);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArrayRead(y[j],&yarray);CHKERRQ(ierr); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArray(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z) { const PetscScalar *xarray,*yarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; cublasHandle_t cublasv2handle; cublasStatus_t cerr; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); /* arguments y, x are reversed because BLAS complex conjugates the first argument, PETSc the second */ ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cublasXdot(cublasv2handle,bn,yarray,one,xarray,one,z);CHKERRCUBLAS(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (xin->map->n >0) { ierr = PetscLogGpuFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = PetscLogGpuToCpu(sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); } // // CUDA kernels for MDot to follow // // set work group size to be a power of 2 (128 is usually a good compromise between portability and speed) #define MDOT_WORKGROUP_SIZE 128 #define MDOT_WORKGROUP_NUM 128 #if !defined(PETSC_USE_COMPLEX) // M = 2: __global__ void VecMDot_SeqCUDA_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE]; } } // M = 3: __global__ void VecMDot_SeqCUDA_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; } } // M = 4: __global__ void VecMDot_SeqCUDA_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; } } // M = 8: __global__ void VecMDot_SeqCUDA_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = PetscMin((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; PetscScalar group_sum4 = 0; PetscScalar group_sum5 = 0; PetscScalar group_sum6 = 0; PetscScalar group_sum7 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; group_sum4 += entry_x * y4[i]; group_sum5 += entry_x * y5[i]; group_sum6 += entry_x * y6[i]; group_sum7 += entry_x * y7[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE]; } } #endif /* !defined(PETSC_USE_COMPLEX) */ PetscErrorCode VecMDot_SeqCUDA(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z) { PetscErrorCode ierr; PetscInt i,n = xin->map->n,current_y_index = 0; const PetscScalar *xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr; #if !defined(PETSC_USE_COMPLEX) PetscInt nv1 = ((nv % 4) == 1) ? nv-1: nv,j; PetscScalar *group_results_gpu,group_results_cpu[nv1*MDOT_WORKGROUP_NUM]; cudaError_t cuda_ierr; #endif PetscBLASInt one = 1,bn = 0; cublasHandle_t cublasv2handle; cublasStatus_t cberr; cudaError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUDA not positive."); /* Handle the case of local size zero first */ if (!xin->map->n) { for (i=0; i<nv; ++i) z[i] = 0; PetscFunctionReturn(0); } #if !defined(PETSC_USE_COMPLEX) // allocate scratchpad memory for the results of individual work groups: cuda_ierr = cudaMalloc((void**)&group_results_gpu, nv1*sizeof(PetscScalar)*MDOT_WORKGROUP_NUM);CHKERRCUDA(cuda_ierr); #endif ierr = VecCUDAGetArrayRead(xin,&xptr);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); while (current_y_index < nv) { switch (nv - current_y_index) { case 7: case 6: case 5: case 4: ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr); #else VecMDot_SeqCUDA_kernel4<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu+current_y_index*MDOT_WORKGROUP_NUM); #endif ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr); current_y_index += 4; break; case 3: ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr); #else VecMDot_SeqCUDA_kernel3<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu+current_y_index*MDOT_WORKGROUP_NUM); #endif ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); current_y_index += 3; break; case 2: ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr); #else VecMDot_SeqCUDA_kernel2<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,n,group_results_gpu+current_y_index*MDOT_WORKGROUP_NUM); #endif ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); current_y_index += 2; break; case 1: ierr = VecCUDAGetArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr); cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArrayRead(yin[current_y_index],&y0ptr);CHKERRQ(ierr); current_y_index += 1; break; default: // 8 or more vectors left ierr = VecCUDAGetArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) cberr = cublasXdot(cublasv2handle,bn,y0ptr,one,xptr,one,&z[current_y_index]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y1ptr,one,xptr,one,&z[current_y_index+1]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y2ptr,one,xptr,one,&z[current_y_index+2]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y3ptr,one,xptr,one,&z[current_y_index+3]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y4ptr,one,xptr,one,&z[current_y_index+4]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y5ptr,one,xptr,one,&z[current_y_index+5]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y6ptr,one,xptr,one,&z[current_y_index+6]);CHKERRCUBLAS(cberr); cberr = cublasXdot(cublasv2handle,bn,y7ptr,one,xptr,one,&z[current_y_index+7]);CHKERRCUBLAS(cberr); #else VecMDot_SeqCUDA_kernel8<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu+current_y_index*MDOT_WORKGROUP_NUM); #endif ierr = VecCUDARestoreArrayRead(yin[current_y_index ],&y0ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+1],&y1ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+2],&y2ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+3],&y3ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+4],&y4ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+5],&y5ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+6],&y6ptr);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin[current_y_index+7],&y7ptr);CHKERRQ(ierr); current_y_index += 8; break; } } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xptr);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) ierr = PetscLogGpuToCpu(nv*sizeof(PetscScalar));CHKERRQ(ierr); #else // copy results to CPU cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,nv1*sizeof(PetscScalar)*MDOT_WORKGROUP_NUM,cudaMemcpyDeviceToHost);CHKERRCUDA(cuda_ierr); // sum group results into z for (j=0; j<nv1; ++j) { z[j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[j] += group_results_cpu[i]; } ierr = PetscLogFlops(nv1*MDOT_WORKGROUP_NUM);CHKERRQ(ierr); cuda_ierr = cudaFree(group_results_gpu);CHKERRCUDA(cuda_ierr); ierr = PetscLogGpuToCpu(nv1*sizeof(PetscScalar)*MDOT_WORKGROUP_NUM);CHKERRQ(ierr); #endif ierr = PetscLogGpuFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr); PetscFunctionReturn(0); } #undef MDOT_WORKGROUP_SIZE #undef MDOT_WORKGROUP_NUM PetscErrorCode VecSet_SeqCUDA(Vec xin,PetscScalar alpha) { PetscInt n = xin->map->n; PetscScalar *xarray = NULL; thrust::device_ptr<PetscScalar> xptr; PetscErrorCode ierr; cudaError_t err; PetscFunctionBegin; ierr = VecCUDAGetArrayWrite(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (alpha == (PetscScalar)0.0) { err = cudaMemset(xarray,0,n*sizeof(PetscScalar));CHKERRCUDA(err); } else { try { xptr = thrust::device_pointer_cast(xarray); thrust::fill(xptr,xptr+n,alpha); } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecScale_SeqCUDA(Vec xin,PetscScalar alpha) { PetscScalar *xarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; cublasHandle_t cublasv2handle; cublasStatus_t cberr; cudaError_t err; PetscFunctionBegin; if (alpha == (PetscScalar)0.0) { ierr = VecSet_SeqCUDA(xin,alpha);CHKERRQ(ierr); err = WaitForCUDA();CHKERRCUDA(err); } else if (alpha != (PetscScalar)1.0) { ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArray(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXscal(cublasv2handle,bn,&alpha,xarray,one);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArray(xin,&xarray);CHKERRQ(ierr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); ierr = PetscLogGpuFlops(xin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecTDot_SeqCUDA(Vec xin,Vec yin,PetscScalar *z) { const PetscScalar *xarray,*yarray; PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; cublasHandle_t cublasv2handle; cublasStatus_t cerr; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cublasXdotu(cublasv2handle,bn,xarray,one,yarray,one,z);CHKERRCUBLAS(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (xin->map->n > 0) { ierr = PetscLogGpuFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = PetscLogGpuToCpu(sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecCopy_SeqCUDA(Vec xin,Vec yin) { const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; cudaError_t err; PetscFunctionBegin; if (xin != yin) { if (xin->offloadmask == PETSC_OFFLOAD_GPU) { PetscBool yiscuda; ierr = PetscObjectTypeCompareAny((PetscObject)yin,&yiscuda,VECSEQCUDA,VECMPICUDA,"");CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); if (yiscuda) { ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecGetArrayWrite(yin,&yarray);CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (yiscuda) { err = cudaMemcpyAsync(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice,PetscDefaultCudaStream);CHKERRCUDA(err); } else { err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err); } err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); if (yiscuda) { ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } } else if (xin->offloadmask == PETSC_OFFLOAD_CPU) { /* copy in CPU if we are on the CPU */ ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr); } else if (xin->offloadmask == PETSC_OFFLOAD_BOTH) { /* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */ if (yin->offloadmask == PETSC_OFFLOAD_CPU) { /* copy in CPU */ ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr); } else if (yin->offloadmask == PETSC_OFFLOAD_GPU) { /* copy in GPU */ ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); err = cudaMemcpyAsync(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice,PetscDefaultCudaStream);CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (yin->offloadmask == PETSC_OFFLOAD_BOTH) { /* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck default to copy in GPU (this is an arbitrary choice) */ ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); err = cudaMemcpyAsync(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice,PetscDefaultCudaStream);CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCopy_SeqCUDA_Private(xin,yin);CHKERRQ(ierr); } } } PetscFunctionReturn(0); } PetscErrorCode VecSwap_SeqCUDA(Vec xin,Vec yin) { PetscErrorCode ierr; PetscBLASInt one = 1,bn = 0; PetscScalar *xarray,*yarray; cublasHandle_t cublasv2handle; cublasStatus_t cberr; cudaError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); if (xin != yin) { ierr = VecCUDAGetArray(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXswap(cublasv2handle,bn,xarray,one,yarray,one);CHKERRCUBLAS(cberr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArray(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecAXPBY_SeqCUDA(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin) { PetscErrorCode ierr; PetscScalar a = alpha,b = beta; const PetscScalar *xarray; PetscScalar *yarray; PetscBLASInt one = 1, bn = 0; cublasHandle_t cublasv2handle; cublasStatus_t cberr; cudaError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(yin->map->n,&bn);CHKERRQ(ierr); if (a == (PetscScalar)0.0) { ierr = VecScale_SeqCUDA(yin,beta);CHKERRQ(ierr); } else if (b == (PetscScalar)1.0) { ierr = VecAXPY_SeqCUDA(yin,alpha,xin);CHKERRQ(ierr); } else if (a == (PetscScalar)1.0) { ierr = VecAYPX_SeqCUDA(yin,beta,xin);CHKERRQ(ierr); } else if (b == (PetscScalar)0.0) { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); err = cudaMemcpy(yarray,xarray,yin->map->n*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);CHKERRCUDA(err); cberr = cublasXscal(cublasv2handle,bn,&alpha,yarray,one);CHKERRCUBLAS(cberr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(xin->map->n);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXscal(cublasv2handle,bn,&beta,yarray,one);CHKERRCUBLAS(cberr); cberr = cublasXaxpy(cublasv2handle,bn,&alpha,xarray,one,yarray,one);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(yin,&yarray);CHKERRQ(ierr); err = WaitForCUDA();CHKERRCUDA(err); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(3.0*xin->map->n);CHKERRQ(ierr); ierr = PetscLogCpuToGpu(2*sizeof(PetscScalar));CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecAXPBYPCZ_SeqCUDA(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = zin->map->n; PetscFunctionBegin; if (gamma == (PetscScalar)1.0) { /* z = ax + b*y + z */ ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr); ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr); ierr = PetscLogGpuFlops(4.0*n);CHKERRQ(ierr); } else { /* z = a*x + b*y + c*z */ ierr = VecScale_SeqCUDA(zin,gamma);CHKERRQ(ierr); ierr = VecAXPY_SeqCUDA(zin,alpha,xin);CHKERRQ(ierr); ierr = VecAXPY_SeqCUDA(zin,beta,yin);CHKERRQ(ierr); ierr = PetscLogGpuFlops(5.0*n);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecPointwiseMult_SeqCUDA(Vec win,Vec xin,Vec yin) { PetscInt n = win->map->n; const PetscScalar *xarray,*yarray; PetscScalar *warray; thrust::device_ptr<const PetscScalar> xptr,yptr; thrust::device_ptr<PetscScalar> wptr; PetscErrorCode ierr; cudaError_t err; PetscFunctionBegin; ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); try { wptr = thrust::device_pointer_cast(warray); xptr = thrust::device_pointer_cast(xarray); yptr = thrust::device_pointer_cast(yarray); thrust::transform(xptr,xptr+n,yptr,wptr,thrust::multiplies<PetscScalar>()); err = WaitForCUDA();CHKERRCUDA(err); } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogGpuFlops(n);CHKERRQ(ierr); PetscFunctionReturn(0); } /* should do infinity norm in cuda */ PetscErrorCode VecNorm_SeqCUDA(Vec xin,NormType type,PetscReal *z) { PetscErrorCode ierr; PetscInt n = xin->map->n; PetscBLASInt one = 1, bn = 0; const PetscScalar *xarray; cublasHandle_t cublasv2handle; cublasStatus_t cberr; cudaError_t err; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr); if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXnrm2(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr); } else if (type == NORM_INFINITY) { int i; ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasIXamax(cublasv2handle,bn,xarray,one,&i);CHKERRCUBLAS(cberr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); if (bn) { PetscScalar zs; err = cudaMemcpy(&zs,xarray+i-1,sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(err); *z = PetscAbsScalar(zs); } else *z = 0.0; ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); } else if (type == NORM_1) { ierr = VecCUDAGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cberr = cublasXasum(cublasv2handle,bn,xarray,one,z);CHKERRCUBLAS(cberr); ierr = VecCUDARestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr); } else if (type == NORM_1_AND_2) { ierr = VecNorm_SeqCUDA(xin,NORM_1,z);CHKERRQ(ierr); ierr = VecNorm_SeqCUDA(xin,NORM_2,z+1);CHKERRQ(ierr); } ierr = PetscLogGpuToCpu(sizeof(PetscReal));CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecDotNorm2_SeqCUDA(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecDot_SeqCUDA(s,t,dp);CHKERRQ(ierr); ierr = VecDot_SeqCUDA(t,t,nm);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecDestroy_SeqCUDA(Vec v) { PetscErrorCode ierr; cudaError_t cerr; Vec_CUDA *veccuda = (Vec_CUDA*)v->spptr; PetscFunctionBegin; if (v->spptr) { if (veccuda->GPUarray_allocated) { #if defined(PETSC_HAVE_NVSHMEM) if (veccuda->nvshmem) { ierr = PetscNvshmemFree(veccuda->GPUarray_allocated);CHKERRQ(ierr); veccuda->nvshmem = PETSC_FALSE; } else #endif {cerr = cudaFree(veccuda->GPUarray_allocated);CHKERRCUDA(cerr);} veccuda->GPUarray_allocated = NULL; } if (veccuda->stream) { cerr = cudaStreamDestroy(veccuda->stream);CHKERRCUDA(cerr); } } ierr = VecDestroy_SeqCUDA_Private(v);CHKERRQ(ierr); ierr = PetscFree(v->spptr);CHKERRQ(ierr); PetscFunctionReturn(0); } #if defined(PETSC_USE_COMPLEX) struct conjugate { __host__ __device__ PetscScalar operator()(PetscScalar x) { return PetscConj(x); } }; #endif PetscErrorCode VecConjugate_SeqCUDA(Vec xin) { #if defined(PETSC_USE_COMPLEX) PetscScalar *xarray; PetscErrorCode ierr; PetscInt n = xin->map->n; thrust::device_ptr<PetscScalar> xptr; cudaError_t err; PetscFunctionBegin; ierr = VecCUDAGetArray(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); try { xptr = thrust::device_pointer_cast(xarray); thrust::transform(xptr,xptr+n,xptr,conjugate()); err = WaitForCUDA();CHKERRCUDA(err); } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArray(xin,&xarray);CHKERRQ(ierr); #else PetscFunctionBegin; #endif PetscFunctionReturn(0); } PetscErrorCode VecGetLocalVector_SeqCUDA(Vec v,Vec w) { PetscErrorCode ierr; cudaError_t err; PetscFunctionBegin; PetscValidHeaderSpecific(v,VEC_CLASSID,1); PetscValidHeaderSpecific(w,VEC_CLASSID,2); PetscCheckTypeName(w,VECSEQCUDA); PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); if (w->data) { if (((Vec_Seq*)w->data)->array_allocated) { if (w->pinned_memory) { ierr = PetscMallocSetCUDAHost();CHKERRQ(ierr); } ierr = PetscFree(((Vec_Seq*)w->data)->array_allocated);CHKERRQ(ierr); if (w->pinned_memory) { ierr = PetscMallocResetCUDAHost();CHKERRQ(ierr); w->pinned_memory = PETSC_FALSE; } } ((Vec_Seq*)w->data)->array = NULL; ((Vec_Seq*)w->data)->unplacedarray = NULL; } if (w->spptr) { PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); if (((Vec_CUDA*)w->spptr)->GPUarray) { err = cudaFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err); ((Vec_CUDA*)w->spptr)->GPUarray = NULL; } if (((Vec_CUDA*)w->spptr)->stream) { err = cudaStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err); } ierr = PetscFree(w->spptr);CHKERRQ(ierr); } if (v->petscnative) { ierr = PetscFree(w->data);CHKERRQ(ierr); w->data = v->data; w->offloadmask = v->offloadmask; w->pinned_memory = v->pinned_memory; w->spptr = v->spptr; ierr = PetscObjectStateIncrease((PetscObject)w);CHKERRQ(ierr); } else { ierr = VecGetArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr); w->offloadmask = PETSC_OFFLOAD_CPU; ierr = VecCUDAAllocateCheck(w);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode VecRestoreLocalVector_SeqCUDA(Vec v,Vec w) { PetscErrorCode ierr; cudaError_t err; PetscFunctionBegin; PetscValidHeaderSpecific(v,VEC_CLASSID,1); PetscValidHeaderSpecific(w,VEC_CLASSID,2); PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); PetscCheckTypeName(w,VECSEQCUDA); if (v->petscnative) { v->data = w->data; v->offloadmask = w->offloadmask; v->pinned_memory = w->pinned_memory; v->spptr = w->spptr; w->data = 0; w->offloadmask = PETSC_OFFLOAD_UNALLOCATED; w->spptr = 0; } else { ierr = VecRestoreArray(v,&((Vec_Seq*)w->data)->array);CHKERRQ(ierr); if ((Vec_CUDA*)w->spptr) { err = cudaFree(((Vec_CUDA*)w->spptr)->GPUarray);CHKERRCUDA(err); ((Vec_CUDA*)w->spptr)->GPUarray = NULL; if (((Vec_CUDA*)v->spptr)->stream) { err = cudaStreamDestroy(((Vec_CUDA*)w->spptr)->stream);CHKERRCUDA(err); } ierr = PetscFree(w->spptr);CHKERRQ(ierr); } } PetscFunctionReturn(0); } struct petscrealpart : public thrust::unary_function<PetscScalar,PetscReal> { __host__ __device__ PetscReal operator()(PetscScalar x) { return PetscRealPart(x); } }; struct petscrealparti : public thrust::unary_function<thrust::tuple<PetscScalar, PetscInt>,thrust::tuple<PetscReal, PetscInt>> { __host__ __device__ thrust::tuple<PetscReal, PetscInt> operator()(thrust::tuple<PetscScalar, PetscInt> x) { return thrust::make_tuple(PetscRealPart(x.get<0>()), x.get<1>()); } }; struct petscmax : public thrust::binary_function<PetscReal,PetscReal,PetscReal> { __host__ __device__ PetscReal operator()(PetscReal x, PetscReal y) { return x < y ? y : x; } }; struct petscmaxi : public thrust::binary_function<thrust::tuple<PetscReal, PetscInt>,thrust::tuple<PetscReal, PetscInt>,thrust::tuple<PetscReal, PetscInt>> { __host__ __device__ thrust::tuple<PetscReal, PetscInt> operator()(thrust::tuple<PetscReal, PetscInt> x, thrust::tuple<PetscReal, PetscInt> y) { return x.get<0>() < y.get<0>() ? thrust::make_tuple(y.get<0>(), y.get<1>()) : (x.get<0>() != y.get<0>() ? thrust::make_tuple(x.get<0>(), x.get<1>()) : (x.get<1>() < y.get<1>() ? thrust::make_tuple(x.get<0>(), x.get<1>()) : thrust::make_tuple(y.get<0>(), y.get<1>()))); } }; struct petscmin : public thrust::binary_function<PetscReal,PetscReal,PetscReal> { __host__ __device__ PetscReal operator()(PetscReal x, PetscReal y) { return x < y ? x : y; } }; struct petscmini : public thrust::binary_function<thrust::tuple<PetscReal, PetscInt>,thrust::tuple<PetscReal, PetscInt>,thrust::tuple<PetscReal, PetscInt>> { __host__ __device__ thrust::tuple<PetscReal, PetscInt> operator()(thrust::tuple<PetscReal, PetscInt> x, thrust::tuple<PetscReal, PetscInt> y) { return x.get<0>() > y.get<0>() ? thrust::make_tuple(y.get<0>(), y.get<1>()) : (x.get<0>() != y.get<0>() ? thrust::make_tuple(x.get<0>(), x.get<1>()) : (x.get<1>() < y.get<1>() ? thrust::make_tuple(x.get<0>(), x.get<1>()) : thrust::make_tuple(y.get<0>(), y.get<1>()))); } }; PetscErrorCode VecMax_SeqCUDA(Vec v, PetscInt *p, PetscReal *m) { PetscErrorCode ierr; PetscInt n = v->map->n; const PetscScalar *av; thrust::device_ptr<const PetscScalar> avpt; PetscFunctionBegin; PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); if (!n) { *m = PETSC_MIN_REAL; if (p) *p = -1; PetscFunctionReturn(0); } ierr = VecCUDAGetArrayRead(v,&av);CHKERRQ(ierr); avpt = thrust::device_pointer_cast(av); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (p) { thrust::tuple<PetscReal,PetscInt> res(PETSC_MIN_REAL,-1); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(avpt,thrust::counting_iterator<PetscInt>(0))); try { #if defined(PETSC_USE_COMPLEX) res = thrust::transform_reduce(zibit,zibit+n,petscrealparti(),res,petscmaxi()); #else res = thrust::reduce(zibit,zibit+n,res,petscmaxi()); #endif } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } *m = res.get<0>(); *p = res.get<1>(); } else { try { #if defined(PETSC_USE_COMPLEX) *m = thrust::transform_reduce(avpt,avpt+n,petscrealpart(),PETSC_MIN_REAL,petscmax()); #else *m = thrust::reduce(avpt,avpt+n,PETSC_MIN_REAL,petscmax()); #endif } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(v,&av);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode VecMin_SeqCUDA(Vec v, PetscInt *p, PetscReal *m) { PetscErrorCode ierr; PetscInt n = v->map->n; const PetscScalar *av; thrust::device_ptr<const PetscScalar> avpt; PetscFunctionBegin; PetscCheckTypeNames(v,VECSEQCUDA,VECMPICUDA); if (!n) { *m = PETSC_MAX_REAL; if (p) *p = -1; PetscFunctionReturn(0); } ierr = VecCUDAGetArrayRead(v,&av);CHKERRQ(ierr); avpt = thrust::device_pointer_cast(av); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (p) { thrust::tuple<PetscReal,PetscInt> res(PETSC_MAX_REAL,-1); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(avpt,thrust::counting_iterator<PetscInt>(0))); try { #if defined(PETSC_USE_COMPLEX) res = thrust::transform_reduce(zibit,zibit+n,petscrealparti(),res,petscmini()); #else res = thrust::reduce(zibit,zibit+n,res,petscmini()); #endif } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } *m = res.get<0>(); *p = res.get<1>(); } else { try { #if defined(PETSC_USE_COMPLEX) *m = thrust::transform_reduce(avpt,avpt+n,petscrealpart(),PETSC_MAX_REAL,petscmin()); #else *m = thrust::reduce(avpt,avpt+n,PETSC_MAX_REAL,petscmin()); #endif } catch (char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Thrust error: %s", ex); } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(v,&av);CHKERRQ(ierr); PetscFunctionReturn(0); } #if defined(PETSC_HAVE_NVSHMEM) /* Free old CUDA array and re-allocate a new one from nvshmem symmetric heap. New array does not retain values in the old array. The offload mask is not changed. Note: the function is only meant to be used in MatAssemblyEnd_MPIAIJCUSPARSE. */ PetscErrorCode VecAllocateNVSHMEM_SeqCUDA(Vec v) { PetscErrorCode ierr; cudaError_t cerr; Vec_CUDA *veccuda = (Vec_CUDA*)v->spptr; PetscInt n; PetscFunctionBegin; cerr = cudaFree(veccuda->GPUarray_allocated);CHKERRCUDA(cerr); ierr = VecGetLocalSize(v,&n);CHKERRQ(ierr); ierr = MPIU_Allreduce(MPI_IN_PLACE,&n,1,MPIU_INT,MPI_MAX,PETSC_COMM_WORLD);CHKERRMPI(ierr); ierr = PetscNvshmemMalloc(n*sizeof(PetscScalar),(void**)&veccuda->GPUarray_allocated);CHKERRQ(ierr); veccuda->GPUarray = veccuda->GPUarray_allocated; veccuda->nvshmem = PETSC_TRUE; PetscFunctionReturn(0); } #endif
b7f8ec47e5b3700d33d83299b945bab407219882.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <hipfft.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #define dim 3 #define TPBx 16 // TPBx * TPBy = number of threads per block #define TPBy 8 #define TPBz 8 __global__ void real2complex(hipfftDoubleComplex *c, double *a, int n); __global__ void complex2real_scaled(double *a, hipfftDoubleComplex *c, double scale, int n); __global__ void solve_poisson(hipfftDoubleComplex *c, double *kx, double *ky, double *kz, int n); void exportData(const char *file, const double *X, const double *Y, const double *Z, const int n); void exportData2D(const char *xfile, const char *yfile, const char *zfile, const double *X, const double *Y, const double *Z, const double *data, const int n); void gaussian(double *bin, const double *X, const double *Y, const double *Z, const double *sPos, const double *var, const int sNum, const int n); void fixBC(double *result, const double *X, const double *Y, const double *Z, const double delta, const int n); // Boundary condition void getR(double *result, const double *data, const double delta, const int n); void getError(const double *data, const double *result, const int n); int main(){ ///////////////////////////// INITIZALIZATION //////////////////////////// int N, R, sNum; printf("Phase 1: Set Up The Environment for Testing\n"); printf("Input the range of x and y: "); // the range of x and y will be from -R to R scanf("%d", &R); printf("Input the number of samples: "); // the number of samples will be N * N scanf("%d", &N); printf("Allocating memory..."); fflush(stdout); clock_t startTime11 = clock(); char *uXFName = (char *)"uX_data.dat"; char *uYFName = (char *)"uY_data.dat"; char *uZFName = (char *)"uZ_data.dat"; char *rXFName = (char *)"rX_data.dat"; char *rYFName = (char *)"rY_data.dat"; char *rZFName = (char *)"rZ_data.dat"; char *RXFName = (char *)"RX_data.dat"; char *RYFName = (char *)"RY_data.dat"; char *RZFName = (char *)"RZ_data.dat"; double *X = (double *)malloc(sizeof(double) * N); double *Y = (double *)malloc(sizeof(double) * N); double *Z = (double *)malloc(sizeof(double) * N); double *kx = (double *)malloc(sizeof(double) * N); double *ky = (double *)malloc(sizeof(double) * N); double *kz = (double *)malloc(sizeof(double) * N); double *r = (double *)malloc(sizeof(double) * N * N * N); double *r2 = (double *)malloc(sizeof(double) * N * N * N); double *u = (double *)malloc(sizeof(double) * N * N * N); const double EPSILON = 8.85418782 * pow(10, -12); // Permitivity of free space const double PI = 4 * atan(1); double *kx_d, *ky_d, *kz_d, *r_d; hipfftDoubleComplex *r_complex_d; hipMalloc((void **)&kx_d, sizeof(double) * N); hipMalloc((void **)&ky_d, sizeof(double) * N); hipMalloc((void **)&kz_d, sizeof(double) * N); hipMalloc((void **)&r_d, sizeof(double) * N * N * N); hipMalloc((void **)&r_complex_d, sizeof(hipfftDoubleComplex) * N * N * N); int m = 0; double deltaX = (double)R / (N / 2); double deltaK = 1.0 / (2 * R); for(int i = N/-2; i < N/2; i++){ if(m < N){ X[m] = i * deltaX; Y[m] = i * deltaX; Z[m] = i * deltaX; } m += 1; } m = 0; for(int i = 0; i < N/2; i++){ if(m < N/2){ kx[m] = i * deltaK; kx[m+N/2] = (double)(i - N / 2) * deltaK; ky[m] = i * deltaK; ky[m+N/2] = (double)(i - N / 2) * deltaK; kz[m] = i * deltaK; kz[m+N/2] = (double)(i - N / 2) * deltaK; } m += 1; } clock_t endTime11 = clock(); printf("done!\n"); fflush(stdout); // Ask for essential parameters for generating a charge density distribution printf("Number of signal: "); scanf("%d", &sNum); double *sPos = (double *)malloc(sizeof(double) * dim * sNum); // Position of signal double *var = (double *)malloc(sizeof(double) * sNum); // Variances for(int s = 0; s < sNum; s++){ printf("Position of signal %d(e.g. 1.2 -3 0): ", s+1); scanf("%lf %lf %lf", &sPos[0+s*dim], &sPos[1+s*dim], &sPos[2+s*dim]); printf("Value of variance %d: ", s+1); scanf("%lf", &var[s]); } for(int s = 0; s < sNum; s++){ printf("Position %d = (%lf,%lf,%lf); Variance %d = %lf\n", s+1, sPos[0+s*dim], sPos[1+s*dim], sPos[2+s*dim], s+1, sNum, var[s]); } clock_t startTime12 = clock(); gaussian(r, X, Y, Z, sPos, var, sNum, N); // Generate a Gaussian Distribution for r clock_t endTime12 = clock(); for (int i = 0; i < N * N * N; i++){ u[i] = 0.0; } double totalTime11 = (double)(endTime11 - startTime11) / CLOCKS_PER_SEC; double totalTime12 = (double)(endTime12 - startTime12) / CLOCKS_PER_SEC; printf("Phase 1 ended\n"); printf("Time spent on allocating memory: %lf sec\n", totalTime11); printf("Time spent on generating function: %lf sec\n\n", totalTime12); ////////////////////////////////////////////////////////////////////////// printf("Phase 2: Evaluation\n"); printf("Copying data from the host to the device..."); fflush(stdout); clock_t startTime21 = clock(); hipMemcpy(kx_d, kx, sizeof(double) * N, hipMemcpyHostToDevice); hipMemcpy(ky_d, ky, sizeof(double) * N, hipMemcpyHostToDevice); hipMemcpy(kz_d, kz, sizeof(double) * N, hipMemcpyHostToDevice); hipMemcpy(r_d, r, sizeof(double) * N * N * N, hipMemcpyHostToDevice); hipfftHandle plan; if(hipfftPlan3d(&plan, N, N, N, HIPFFT_Z2Z) != HIPFFT_SUCCESS){ printf("\nCUFFT error: Plan creation failed!\n"); } // Compute the execution configuration dim3 dimBlock(TPBx, TPBy, TPBz); dim3 dimGrid(N / dimBlock.x, N / dimBlock.y, N / dimBlock.z); // Handle N not multiple of TPBx or TPBy if(N % TPBx != 0){ dimGrid.x += 1; } if(N % TPBy != 0){ dimGrid.y += 1; } if(N % TPBz != 0){ dimGrid.z += 1; } clock_t endTime21 = clock(); printf("done!\n"); printf("Start to solve the Poisson equation..."); fflush(stdout); clock_t startTime22 = clock(); const double PI2 = 4 * PI * PI; double scale = 1.0 / (N * N * N * PI2); hipLaunchKernelGGL(( real2complex), dim3(dimGrid), dim3(dimBlock), 0, 0, r_complex_d, r_d, N); if(hipfftExecZ2Z(plan, r_complex_d, r_complex_d, HIPFFT_FORWARD) != HIPFFT_SUCCESS){ printf("\nCUFFT error: ExecZ2Z Forward failed!\n"); } if(hipDeviceSynchronize() != hipSuccess){ printf("\nCuda error: Failed to synchronize\n"); } hipLaunchKernelGGL(( solve_poisson), dim3(dimGrid), dim3(dimBlock), 0, 0, r_complex_d, kx_d, ky_d, kz_d, N); if(hipfftExecZ2Z(plan, r_complex_d, r_complex_d, HIPFFT_BACKWARD) != HIPFFT_SUCCESS){ printf("\nCUFFT error: ExecZ2Z Backward failed!\n"); } if(hipDeviceSynchronize() != hipSuccess){ printf("\nCuda error: Failed to synchronize\n"); } hipLaunchKernelGGL(( complex2real_scaled), dim3(dimGrid), dim3(dimBlock), 0, 0, r_d, r_complex_d, scale, N); clock_t endTime22 = clock(); clock_t startTime23 = clock(); hipMemcpy(u, r_d, sizeof(double) * N * N * N, hipMemcpyDeviceToHost); clock_t endTime23 = clock(); printf("done!\n"); fflush(stdout); clock_t startTime24 = clock(); fixBC(u, X, Y, Z, deltaX, N); clock_t endTime24 = clock(); printf("Phase 2 ended\n"); double totalTime21 = (double)(endTime22 + endTime24 - startTime22 - startTime24) / CLOCKS_PER_SEC; double totalTime22 = (double)(endTime21 + endTime23 - startTime21 - endTime23) / CLOCKS_PER_SEC; printf("Time spent on calculation: %lf sec\n", totalTime21); printf("Time spent on data transfer: %lf sec\n\n", totalTime22); // Evaluate error printf("Phase 3: Error Evaluation And Data Exportation\n"); clock_t startTime41 = clock(); printf("delta = %lf\n", deltaX); printf("Evaluating the average error...\n"); fflush(stdout); getR(r2, u, deltaX, N); getError(r, r2, N); clock_t endTime41 = clock(); printf("done!\n"); fflush(stdout); printf("Exporting data...\n"); fflush(stdout); clock_t startTime42 = clock(); exportData2D(uXFName, uYFName, uZFName, X, Y, Z, u, N); exportData2D(rXFName, rYFName, rZFName, X, Y, Z, r, N); exportData2D(RXFName, RYFName, RZFName, X, Y, Z, r2, N); clock_t endTime42 = clock(); printf("done!\n"); fflush(stdout); printf("Phase 4 ended\n"); double totalTime41 = (double)(endTime41 - startTime41) / CLOCKS_PER_SEC; double totalTime42 = (double)(endTime42 - startTime42) / CLOCKS_PER_SEC; printf("Time spent on evaluating error: %lf sec\n", totalTime41); printf("Time spent on data exportation: %lf sec\n\n", totalTime42); // Destroy plan and clean up memory on device free(kx); free(ky); free(kz); free(X); free(Y); free(Z); free(r); free(u); free(sPos); free(var); hipfftDestroy(plan); hipFree(r_d); hipFree(r_complex_d); hipFree(kx_d); hipFree(ky_d); hipFree(kz_d); return 0; } __global__ void real2complex(hipfftDoubleComplex *c, double *a, int n){ /* compute idx and idy, the location of the element in the original NxN array */ int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if(idxX < n && idxY < n && idxZ < n){ int idx = idxX + idxY * n + idxZ * n * n; c[idx].x = a[idx]; c[idx].y = 0.0; } } __global__ void complex2real_scaled(double *a, hipfftDoubleComplex *c, double scale, int n){ /* Compute index X and index Y, the location of the element in the original NxN array */ int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if(idxX < n && idxY < n && idxZ < n){ int idx = idxX + idxY * n + idxZ * n * n; a[idx] = scale * c[idx].x; } } __global__ void solve_poisson(hipfftDoubleComplex *c, double *kx, double *ky, double *kz, int n){ /* compute idxX and idxY, the location of the element in the original NxN array */ int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if(idxX < n && idxY < n && idxZ < n){ int idx = idxX + idxY * n + idxZ * n * n; double scale; if(idxX == 0 && idxY == 0 && idxZ == 0){ scale = 0.0; }else{ scale = -1.0 / (kx[idxX] * kx[idxX] + ky[idxY] * ky[idxY] + kz[idxZ] * kz[idxZ]); } c[idx].x *= scale; c[idx].y *= scale; } } void exportData(const char *file, const double *X, const double *Y, const double *Z, const double *data, const int n){ FILE *dataFile = fopen(file, "w"); printf("Exporting data to \"%s\"...", file); fflush(stdout); if(dataFile != NULL){ for(int k=0; k < n; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ fprintf(dataFile, "%lf\t%lf\t%lf\n", X[i], Y[j], Z[k], data[i+j*n+k*n*n]); } } } printf("done!\n"); printf("All data have been stored in \"%s\".\n", file); fflush(stdout); fclose(dataFile); }else{ printf("File not found!"); } } void exportData2D(const char *xfile, const char *yfile, const char *zfile, const double *X, const double *Y, const double *Z, const double *data, const int n){ FILE *xFile = fopen(xfile, "w"); if(xFile != NULL){ for(int i = 0; i < n; i++){ fprintf(xFile, "%lf\t%lf\n", X[i], data[i+(n/2)*n+(n/2)*n*n]); } printf("All data have been stored in \"%s\".\n", xfile); fclose(xFile); }else{ printf("xFile not found!"); } FILE *yFile = fopen(yfile, "w"); if(yFile != NULL){ for(int j = 0; j < n; j++){ fprintf(yFile, "%lf\t%lf\n", Y[j], data[(n/2)+j*n+(n/2)*n*n]); } printf("All data have been stored in \"%s\".\n", yfile); fclose(yFile); }else{ printf("yFile not found!"); } FILE *zFile = fopen(zfile, "w"); if(zFile != NULL){ for(int k = 0; k < n; k++){ fprintf(zFile, "%lf\t%lf\n", Z[k], data[(n/2)+(n/2)*n+k*n*n]); } printf("All data have been stored in \"%s\".\n", zfile); fclose(zFile); }else{ printf("zFile not found!"); } } void gaussian(double *bin, const double *X, const double *Y, const double *Z, const double *sPos, const double *var, const int sNum, const int n){ const double PI = 4 * atan(1); double x, y, z; double *scale = (double *)malloc(sizeof(double) * sNum); // Normalization factor // Generate required function printf("Generating density distribution..."); fflush(stdout); for(int s = 0; s < sNum; s++){ scale[s] = 10.0 / sqrt(2 * PI * var[s]); } for(int k=0; k < n ; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ bin[i+j*n+k*n*n] = 0; for(int s = 0; s < sNum; s++){ x = X[i] - sPos[0+s*dim]; y = Y[j] - sPos[1+s*dim]; z = Z[k] - sPos[2+s*dim]; bin[i+j*n+k*n*n] += scale[s] * exp(-(x * x + y * y + z * z)/(2 * var[s])); } } } } printf("done!\n"); fflush(stdout); } void fixBC(double *result, const double *X, const double *Y, const double *Z, const double delta, const int n){ double a, b, c, d; // Solution of laplace equation: ax + by + cz + d printf("Handling boundary condition..."); fflush(stdout); a = (double)(result[2+1*n+1*n*n] - result[0+1*n+1*n*n]) / (delta * 2); b = (double)(result[1+2*n+1*n*n] - result[1+0*n+1*n*n]) / (delta * 2); c = (double)(result[1+1*n+2*n*n] - result[1+1*n+0*n*n]) / (delta * 2); d = result[1+1*n+1*n*n] - a * X[1] - b * Y[1] - c * Z[1]; for(int k = 0; k < n ; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i<n; i++){ result[i+j*n+k*n*n] -= a * X[i] + b * Y[j] + c * Z[k] + d; } } } printf("done!\n"); fflush(stdout); } void getR(double *result, const double *data, const double delta, const int n){ const double iDelta2 = 1.0 / (delta * delta); const double scale = iDelta2; // Fix boundary to be zero for(int i = 0; i < n; i++){ result[i+0*n+0*n*n] = 0.0; result[i+(n-1)*n+0*n*n] = 0.0; result[i+0*n+(n-1)*n*n] = 0.0; result[i+(n-1)*n+(n-1)*n*n] = 0.0; result[0+i*n+0*n*n] = 0.0; result[(n-1)+i*n+0*n*n] = 0.0; result[0+i*n+(n-1)*n*n] = 0.0; result[(n-1)+i*n+(n-1)*n*n] = 0.0; result[0+0*n+i*n*n] = 0.0; result[(n-1)+0*n+i*n*n] = 0.0; result[0+(n-1)*n+i*n*n] = 0.0; result[(n-1)+(n-1)*n+i*n*n] = 0.0; } // Finite Difference for(int k = 1; k < n - 1; k++){ for(int j = 1; j < n - 1; j++){ for(int i = 1; i < n - 1; i++){ result[i+j*n+k*n*n] = scale * (data[(i-1)+j*n+k*n*n] + data[(i+1)+j*n+k*n*n] + data[i+(j-1)*n+k*n*n] + data[i+(j+1)*n+k*n*n] + data[i+j*n+(k-1)*n*n] + data[i+j*n+(k+1)*n*n] - 6 * data[i+j*n+k*n*n]); } } } } void getError(const double *data, const double *result, const int n){ double error = 0.0; double totalError = 0.0; double averageError = 0.0; double maxError = 0.0; int count = 0; for(int k=0; k < n ; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ if (abs(result[i+j*n+k*n*n])>0 && abs(data[i+j*n+k*n*n])>0){ error = (double) abs(result[i+j*n+k*n*n] - data[i+j*n+k*n*n]); totalError += error; if(error > maxError){ maxError = error; } count += 1; } } } } printf("Max error: %lf\n", maxError); averageError = (double) totalError / count; printf("Average error = %lf\n", averageError); }
b7f8ec47e5b3700d33d83299b945bab407219882.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <cufft.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #define dim 3 #define TPBx 16 // TPBx * TPBy = number of threads per block #define TPBy 8 #define TPBz 8 __global__ void real2complex(cufftDoubleComplex *c, double *a, int n); __global__ void complex2real_scaled(double *a, cufftDoubleComplex *c, double scale, int n); __global__ void solve_poisson(cufftDoubleComplex *c, double *kx, double *ky, double *kz, int n); void exportData(const char *file, const double *X, const double *Y, const double *Z, const int n); void exportData2D(const char *xfile, const char *yfile, const char *zfile, const double *X, const double *Y, const double *Z, const double *data, const int n); void gaussian(double *bin, const double *X, const double *Y, const double *Z, const double *sPos, const double *var, const int sNum, const int n); void fixBC(double *result, const double *X, const double *Y, const double *Z, const double delta, const int n); // Boundary condition void getR(double *result, const double *data, const double delta, const int n); void getError(const double *data, const double *result, const int n); int main(){ ///////////////////////////// INITIZALIZATION //////////////////////////// int N, R, sNum; printf("Phase 1: Set Up The Environment for Testing\n"); printf("Input the range of x and y: "); // the range of x and y will be from -R to R scanf("%d", &R); printf("Input the number of samples: "); // the number of samples will be N * N scanf("%d", &N); printf("Allocating memory..."); fflush(stdout); clock_t startTime11 = clock(); char *uXFName = (char *)"uX_data.dat"; char *uYFName = (char *)"uY_data.dat"; char *uZFName = (char *)"uZ_data.dat"; char *rXFName = (char *)"rX_data.dat"; char *rYFName = (char *)"rY_data.dat"; char *rZFName = (char *)"rZ_data.dat"; char *RXFName = (char *)"RX_data.dat"; char *RYFName = (char *)"RY_data.dat"; char *RZFName = (char *)"RZ_data.dat"; double *X = (double *)malloc(sizeof(double) * N); double *Y = (double *)malloc(sizeof(double) * N); double *Z = (double *)malloc(sizeof(double) * N); double *kx = (double *)malloc(sizeof(double) * N); double *ky = (double *)malloc(sizeof(double) * N); double *kz = (double *)malloc(sizeof(double) * N); double *r = (double *)malloc(sizeof(double) * N * N * N); double *r2 = (double *)malloc(sizeof(double) * N * N * N); double *u = (double *)malloc(sizeof(double) * N * N * N); const double EPSILON = 8.85418782 * pow(10, -12); // Permitivity of free space const double PI = 4 * atan(1); double *kx_d, *ky_d, *kz_d, *r_d; cufftDoubleComplex *r_complex_d; cudaMalloc((void **)&kx_d, sizeof(double) * N); cudaMalloc((void **)&ky_d, sizeof(double) * N); cudaMalloc((void **)&kz_d, sizeof(double) * N); cudaMalloc((void **)&r_d, sizeof(double) * N * N * N); cudaMalloc((void **)&r_complex_d, sizeof(cufftDoubleComplex) * N * N * N); int m = 0; double deltaX = (double)R / (N / 2); double deltaK = 1.0 / (2 * R); for(int i = N/-2; i < N/2; i++){ if(m < N){ X[m] = i * deltaX; Y[m] = i * deltaX; Z[m] = i * deltaX; } m += 1; } m = 0; for(int i = 0; i < N/2; i++){ if(m < N/2){ kx[m] = i * deltaK; kx[m+N/2] = (double)(i - N / 2) * deltaK; ky[m] = i * deltaK; ky[m+N/2] = (double)(i - N / 2) * deltaK; kz[m] = i * deltaK; kz[m+N/2] = (double)(i - N / 2) * deltaK; } m += 1; } clock_t endTime11 = clock(); printf("done!\n"); fflush(stdout); // Ask for essential parameters for generating a charge density distribution printf("Number of signal: "); scanf("%d", &sNum); double *sPos = (double *)malloc(sizeof(double) * dim * sNum); // Position of signal double *var = (double *)malloc(sizeof(double) * sNum); // Variances for(int s = 0; s < sNum; s++){ printf("Position of signal %d(e.g. 1.2 -3 0): ", s+1); scanf("%lf %lf %lf", &sPos[0+s*dim], &sPos[1+s*dim], &sPos[2+s*dim]); printf("Value of variance %d: ", s+1); scanf("%lf", &var[s]); } for(int s = 0; s < sNum; s++){ printf("Position %d = (%lf,%lf,%lf); Variance %d = %lf\n", s+1, sPos[0+s*dim], sPos[1+s*dim], sPos[2+s*dim], s+1, sNum, var[s]); } clock_t startTime12 = clock(); gaussian(r, X, Y, Z, sPos, var, sNum, N); // Generate a Gaussian Distribution for r clock_t endTime12 = clock(); for (int i = 0; i < N * N * N; i++){ u[i] = 0.0; } double totalTime11 = (double)(endTime11 - startTime11) / CLOCKS_PER_SEC; double totalTime12 = (double)(endTime12 - startTime12) / CLOCKS_PER_SEC; printf("Phase 1 ended\n"); printf("Time spent on allocating memory: %lf sec\n", totalTime11); printf("Time spent on generating function: %lf sec\n\n", totalTime12); ////////////////////////////////////////////////////////////////////////// printf("Phase 2: Evaluation\n"); printf("Copying data from the host to the device..."); fflush(stdout); clock_t startTime21 = clock(); cudaMemcpy(kx_d, kx, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMemcpy(ky_d, ky, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMemcpy(kz_d, kz, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMemcpy(r_d, r, sizeof(double) * N * N * N, cudaMemcpyHostToDevice); cufftHandle plan; if(cufftPlan3d(&plan, N, N, N, CUFFT_Z2Z) != CUFFT_SUCCESS){ printf("\nCUFFT error: Plan creation failed!\n"); } // Compute the execution configuration dim3 dimBlock(TPBx, TPBy, TPBz); dim3 dimGrid(N / dimBlock.x, N / dimBlock.y, N / dimBlock.z); // Handle N not multiple of TPBx or TPBy if(N % TPBx != 0){ dimGrid.x += 1; } if(N % TPBy != 0){ dimGrid.y += 1; } if(N % TPBz != 0){ dimGrid.z += 1; } clock_t endTime21 = clock(); printf("done!\n"); printf("Start to solve the Poisson equation..."); fflush(stdout); clock_t startTime22 = clock(); const double PI2 = 4 * PI * PI; double scale = 1.0 / (N * N * N * PI2); real2complex<<<dimGrid, dimBlock>>>(r_complex_d, r_d, N); if(cufftExecZ2Z(plan, r_complex_d, r_complex_d, CUFFT_FORWARD) != CUFFT_SUCCESS){ printf("\nCUFFT error: ExecZ2Z Forward failed!\n"); } if(cudaDeviceSynchronize() != cudaSuccess){ printf("\nCuda error: Failed to synchronize\n"); } solve_poisson<<<dimGrid, dimBlock>>>(r_complex_d, kx_d, ky_d, kz_d, N); if(cufftExecZ2Z(plan, r_complex_d, r_complex_d, CUFFT_INVERSE) != CUFFT_SUCCESS){ printf("\nCUFFT error: ExecZ2Z Backward failed!\n"); } if(cudaDeviceSynchronize() != cudaSuccess){ printf("\nCuda error: Failed to synchronize\n"); } complex2real_scaled<<<dimGrid, dimBlock>>>(r_d, r_complex_d, scale, N); clock_t endTime22 = clock(); clock_t startTime23 = clock(); cudaMemcpy(u, r_d, sizeof(double) * N * N * N, cudaMemcpyDeviceToHost); clock_t endTime23 = clock(); printf("done!\n"); fflush(stdout); clock_t startTime24 = clock(); fixBC(u, X, Y, Z, deltaX, N); clock_t endTime24 = clock(); printf("Phase 2 ended\n"); double totalTime21 = (double)(endTime22 + endTime24 - startTime22 - startTime24) / CLOCKS_PER_SEC; double totalTime22 = (double)(endTime21 + endTime23 - startTime21 - endTime23) / CLOCKS_PER_SEC; printf("Time spent on calculation: %lf sec\n", totalTime21); printf("Time spent on data transfer: %lf sec\n\n", totalTime22); // Evaluate error printf("Phase 3: Error Evaluation And Data Exportation\n"); clock_t startTime41 = clock(); printf("delta = %lf\n", deltaX); printf("Evaluating the average error...\n"); fflush(stdout); getR(r2, u, deltaX, N); getError(r, r2, N); clock_t endTime41 = clock(); printf("done!\n"); fflush(stdout); printf("Exporting data...\n"); fflush(stdout); clock_t startTime42 = clock(); exportData2D(uXFName, uYFName, uZFName, X, Y, Z, u, N); exportData2D(rXFName, rYFName, rZFName, X, Y, Z, r, N); exportData2D(RXFName, RYFName, RZFName, X, Y, Z, r2, N); clock_t endTime42 = clock(); printf("done!\n"); fflush(stdout); printf("Phase 4 ended\n"); double totalTime41 = (double)(endTime41 - startTime41) / CLOCKS_PER_SEC; double totalTime42 = (double)(endTime42 - startTime42) / CLOCKS_PER_SEC; printf("Time spent on evaluating error: %lf sec\n", totalTime41); printf("Time spent on data exportation: %lf sec\n\n", totalTime42); // Destroy plan and clean up memory on device free(kx); free(ky); free(kz); free(X); free(Y); free(Z); free(r); free(u); free(sPos); free(var); cufftDestroy(plan); cudaFree(r_d); cudaFree(r_complex_d); cudaFree(kx_d); cudaFree(ky_d); cudaFree(kz_d); return 0; } __global__ void real2complex(cufftDoubleComplex *c, double *a, int n){ /* compute idx and idy, the location of the element in the original NxN array */ int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if(idxX < n && idxY < n && idxZ < n){ int idx = idxX + idxY * n + idxZ * n * n; c[idx].x = a[idx]; c[idx].y = 0.0; } } __global__ void complex2real_scaled(double *a, cufftDoubleComplex *c, double scale, int n){ /* Compute index X and index Y, the location of the element in the original NxN array */ int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if(idxX < n && idxY < n && idxZ < n){ int idx = idxX + idxY * n + idxZ * n * n; a[idx] = scale * c[idx].x; } } __global__ void solve_poisson(cufftDoubleComplex *c, double *kx, double *ky, double *kz, int n){ /* compute idxX and idxY, the location of the element in the original NxN array */ int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if(idxX < n && idxY < n && idxZ < n){ int idx = idxX + idxY * n + idxZ * n * n; double scale; if(idxX == 0 && idxY == 0 && idxZ == 0){ scale = 0.0; }else{ scale = -1.0 / (kx[idxX] * kx[idxX] + ky[idxY] * ky[idxY] + kz[idxZ] * kz[idxZ]); } c[idx].x *= scale; c[idx].y *= scale; } } void exportData(const char *file, const double *X, const double *Y, const double *Z, const double *data, const int n){ FILE *dataFile = fopen(file, "w"); printf("Exporting data to \"%s\"...", file); fflush(stdout); if(dataFile != NULL){ for(int k=0; k < n; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ fprintf(dataFile, "%lf\t%lf\t%lf\n", X[i], Y[j], Z[k], data[i+j*n+k*n*n]); } } } printf("done!\n"); printf("All data have been stored in \"%s\".\n", file); fflush(stdout); fclose(dataFile); }else{ printf("File not found!"); } } void exportData2D(const char *xfile, const char *yfile, const char *zfile, const double *X, const double *Y, const double *Z, const double *data, const int n){ FILE *xFile = fopen(xfile, "w"); if(xFile != NULL){ for(int i = 0; i < n; i++){ fprintf(xFile, "%lf\t%lf\n", X[i], data[i+(n/2)*n+(n/2)*n*n]); } printf("All data have been stored in \"%s\".\n", xfile); fclose(xFile); }else{ printf("xFile not found!"); } FILE *yFile = fopen(yfile, "w"); if(yFile != NULL){ for(int j = 0; j < n; j++){ fprintf(yFile, "%lf\t%lf\n", Y[j], data[(n/2)+j*n+(n/2)*n*n]); } printf("All data have been stored in \"%s\".\n", yfile); fclose(yFile); }else{ printf("yFile not found!"); } FILE *zFile = fopen(zfile, "w"); if(zFile != NULL){ for(int k = 0; k < n; k++){ fprintf(zFile, "%lf\t%lf\n", Z[k], data[(n/2)+(n/2)*n+k*n*n]); } printf("All data have been stored in \"%s\".\n", zfile); fclose(zFile); }else{ printf("zFile not found!"); } } void gaussian(double *bin, const double *X, const double *Y, const double *Z, const double *sPos, const double *var, const int sNum, const int n){ const double PI = 4 * atan(1); double x, y, z; double *scale = (double *)malloc(sizeof(double) * sNum); // Normalization factor // Generate required function printf("Generating density distribution..."); fflush(stdout); for(int s = 0; s < sNum; s++){ scale[s] = 10.0 / sqrt(2 * PI * var[s]); } for(int k=0; k < n ; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ bin[i+j*n+k*n*n] = 0; for(int s = 0; s < sNum; s++){ x = X[i] - sPos[0+s*dim]; y = Y[j] - sPos[1+s*dim]; z = Z[k] - sPos[2+s*dim]; bin[i+j*n+k*n*n] += scale[s] * exp(-(x * x + y * y + z * z)/(2 * var[s])); } } } } printf("done!\n"); fflush(stdout); } void fixBC(double *result, const double *X, const double *Y, const double *Z, const double delta, const int n){ double a, b, c, d; // Solution of laplace equation: ax + by + cz + d printf("Handling boundary condition..."); fflush(stdout); a = (double)(result[2+1*n+1*n*n] - result[0+1*n+1*n*n]) / (delta * 2); b = (double)(result[1+2*n+1*n*n] - result[1+0*n+1*n*n]) / (delta * 2); c = (double)(result[1+1*n+2*n*n] - result[1+1*n+0*n*n]) / (delta * 2); d = result[1+1*n+1*n*n] - a * X[1] - b * Y[1] - c * Z[1]; for(int k = 0; k < n ; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i<n; i++){ result[i+j*n+k*n*n] -= a * X[i] + b * Y[j] + c * Z[k] + d; } } } printf("done!\n"); fflush(stdout); } void getR(double *result, const double *data, const double delta, const int n){ const double iDelta2 = 1.0 / (delta * delta); const double scale = iDelta2; // Fix boundary to be zero for(int i = 0; i < n; i++){ result[i+0*n+0*n*n] = 0.0; result[i+(n-1)*n+0*n*n] = 0.0; result[i+0*n+(n-1)*n*n] = 0.0; result[i+(n-1)*n+(n-1)*n*n] = 0.0; result[0+i*n+0*n*n] = 0.0; result[(n-1)+i*n+0*n*n] = 0.0; result[0+i*n+(n-1)*n*n] = 0.0; result[(n-1)+i*n+(n-1)*n*n] = 0.0; result[0+0*n+i*n*n] = 0.0; result[(n-1)+0*n+i*n*n] = 0.0; result[0+(n-1)*n+i*n*n] = 0.0; result[(n-1)+(n-1)*n+i*n*n] = 0.0; } // Finite Difference for(int k = 1; k < n - 1; k++){ for(int j = 1; j < n - 1; j++){ for(int i = 1; i < n - 1; i++){ result[i+j*n+k*n*n] = scale * (data[(i-1)+j*n+k*n*n] + data[(i+1)+j*n+k*n*n] + data[i+(j-1)*n+k*n*n] + data[i+(j+1)*n+k*n*n] + data[i+j*n+(k-1)*n*n] + data[i+j*n+(k+1)*n*n] - 6 * data[i+j*n+k*n*n]); } } } } void getError(const double *data, const double *result, const int n){ double error = 0.0; double totalError = 0.0; double averageError = 0.0; double maxError = 0.0; int count = 0; for(int k=0; k < n ; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ if (abs(result[i+j*n+k*n*n])>0 && abs(data[i+j*n+k*n*n])>0){ error = (double) abs(result[i+j*n+k*n*n] - data[i+j*n+k*n*n]); totalError += error; if(error > maxError){ maxError = error; } count += 1; } } } } printf("Max error: %lf\n", maxError); averageError = (double) totalError / count; printf("Average error = %lf\n", averageError); }
effe14a571468e61121f493c3f43a2823f535648.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt) { const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x; if (serial < n){ x[serial] += dt * vx[serial]; y[serial] += dt * vy[serial]; z[serial] += dt * vz[serial]; } }
effe14a571468e61121f493c3f43a2823f535648.cu
#include "includes.h" __global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt) { const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x; if (serial < n){ x[serial] += dt * vx[serial]; y[serial] += dt * vy[serial]; z[serial] += dt * vz[serial]; } }
14baf3352454c3a2bf8a1340e193a1715f6aa8dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> __global__ void vecadd(float *a, float *b, float *c, int n) { int i= threadIdx.x + blockDim.x*blockIdx.x; if(i<n) c[i] = a[i]+b[i]; } int main(){ int n; scanf("%d",&n); int a[n],b[n]; for(int i=0; i<n; i++) scanf("%d",&a[i]); for(int i=0;i<n; i++) scanf("%d",&b[i]); int c[n]; float *da,*db,*dc; int size = n*sizeof(float); hipMalloc((void **) &da,size); hipMalloc((void **) &db,size); hipMalloc((void **) &dc,size); hipMemcpy(da,a,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(db,b,sizeof(int),hipMemcpyHostToDevice); // hipMemcpy(dc,c,sizeof(int),hipMemcpyHostToDevice); // hipMemcpy(n,n,sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( vecadd), dim3(ceil(n/32.0)),dim3(15), 0, 0, da,db,dc,n); hipMemcpy(c,dc,sizeof(int),hipMemcpyDeviceToHost); for(int i=0; i<n; i++) printf("%d ",c[i]); hipFree(da); hipFree(db); hipFree(dc); return 0; }
14baf3352454c3a2bf8a1340e193a1715f6aa8dd.cu
#include<stdio.h> __global__ void vecadd(float *a, float *b, float *c, int n) { int i= threadIdx.x + blockDim.x*blockIdx.x; if(i<n) c[i] = a[i]+b[i]; } int main(){ int n; scanf("%d",&n); int a[n],b[n]; for(int i=0; i<n; i++) scanf("%d",&a[i]); for(int i=0;i<n; i++) scanf("%d",&b[i]); int c[n]; float *da,*db,*dc; int size = n*sizeof(float); cudaMalloc((void **) &da,size); cudaMalloc((void **) &db,size); cudaMalloc((void **) &dc,size); cudaMemcpy(da,a,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(db,b,sizeof(int),cudaMemcpyHostToDevice); // cudaMemcpy(dc,c,sizeof(int),cudaMemcpyHostToDevice); // cudaMemcpy(n,n,sizeof(int),cudaMemcpyHostToDevice); vecadd<<<ceil(n/32.0),15>>>(da,db,dc,n); cudaMemcpy(c,dc,sizeof(int),cudaMemcpyDeviceToHost); for(int i=0; i<n; i++) printf("%d ",c[i]); cudaFree(da); cudaFree(db); cudaFree(dc); return 0; }
d8173d8bf3ad0759ad9a29eb1c82a71fcb50e259.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <helper_cuda.h> __global__ void kernel() { int i; i = blockIdx.x * blockDim.x + threadIdx.x; printf("Hello world! Im thread %i out of %i . My Global thread id is %i out of %i \n", threadIdx.x, blockIdx.x, i, gridDim.x*blockDim.x ); }; int main(int argc, char **argv) { const int device = 0; hipSetDevice(device); // Kernel lauch hipLaunchKernelGGL(( kernel), dim3(1), dim3(256), 0, 0, ); hipDeviceSynchronize(); return(0); };
d8173d8bf3ad0759ad9a29eb1c82a71fcb50e259.cu
#include <stdio.h> #include <helper_cuda.h> __global__ void kernel() { int i; i = blockIdx.x * blockDim.x + threadIdx.x; printf("Hello world! Im thread %i out of %i . My Global thread id is %i out of %i \n", threadIdx.x, blockIdx.x, i, gridDim.x*blockDim.x ); }; int main(int argc, char **argv) { const int device = 0; cudaSetDevice(device); // Kernel lauch kernel<<<1, 256>>>(); cudaDeviceSynchronize(); return(0); };
819fec602e8f349dfabbbb95e277059e21121c1f.hip
// !!! This is a file automatically generated by hipify!!! /* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file saxpy.c * @author Alessandro Capotondi * @date 12 May 2020 * @brief Saxpy * * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <assert.h> #include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #define gpuErrchk(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define TWO02 (1 << 2) #define TWO04 (1 << 4) #define TWO08 (1 << 8) #ifndef N #define N (1 << 27) #endif #ifndef BLOCK_SIZE #define BLOCK_SIZE (512) #endif /* *SAXPY (host implementation) * y := a * x + y */ void host_saxpy(float * __restrict__ y, float a, float * __restrict__ x, int n) { #pragma omp parallel for simd schedule(simd: static) for (int i = 0; i < n; i++) { y[i] = a * x[i] + y[i]; } } __global__ void gpu_saxpy(float * __restrict__ y, float a, float * __restrict__ x, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } int main(int argc, const char **argv) { int iret = 0; int n = N; float *h_x, *d_x; float *h_y, *d_y; float *h_z; float a = 101.0f / TWO02, b, c; struct timespec rt[2]; double wt; // walltime if (argc > 1) n = atoi(argv[1]); //TODO Update malloc to hipHostMalloc or hipMallocManaged (if necessary) if (NULL == (h_x = (float *)malloc(sizeof(float) * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } //TODO Update malloc to hipHostMalloc or hipMallocManaged (if necessary) if (NULL == (h_y = (float *)malloc(sizeof(float) * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (h_z = (float *)malloc(sizeof(float) * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { //TODO Update hipHostFree or hipFree (if necessary) free(h_x); //TODO Update hipHostFree or hipFree (if necessary) free(h_y); free(h_z); exit(EXIT_FAILURE); } //Init Data b = rand() % TWO04; c = rand() % TWO08; for (int i = 0; i < n; i++) { h_x[i] = b / (float)TWO02; h_y[i] = h_z[i] = c / (float)TWO04; } //TODO Remove if unecessary gpuErrchk(hipMalloc((void **)&d_x, sizeof(float) * n)); gpuErrchk(hipMalloc((void **)&d_y, sizeof(float) * n)); clock_gettime(CLOCK_REALTIME, rt + 0); //TODO Remove if unecessary gpuErrchk(hipMemcpy(d_x, h_x, sizeof(float) * n, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_y, h_y, sizeof(float) * n, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( gpu_saxpy), dim3(((n + BLOCK_SIZE - 1) / BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_y, a, d_x, n); gpuErrchk(hipPeekAtLastError()); //TODO Remove if unecessary gpuErrchk(hipMemcpy(h_y, d_y, sizeof(float) * n, hipMemcpyDeviceToHost)); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2 * n / wt); //Check Matematical Consistency clock_gettime(CLOCK_REALTIME, rt + 0); host_saxpy(h_z, a, h_x, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy (Host): %9.3f sec %9.1f GFLOPS\n", wt, 2 * n / wt); for (int i = 0; i < n; ++i) { iret = *(int *)(h_y + i) ^ *(int *)(h_z + i); assert(iret == 0); } //TODO Update hipHostFree or hipFree (if necessary) free(h_x); //TODO Remove if unecessary gpuErrchk(hipFree(d_x)); //TODO Update hipHostFree or hipFree (if necessary) free(h_y); //TODO Remove if unecessary gpuErrchk(hipFree(d_y)); free(h_z); // CUDA exit -- needed to flush printf write buffer hipDeviceReset(); return 0; }
819fec602e8f349dfabbbb95e277059e21121c1f.cu
/* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file saxpy.c * @author Alessandro Capotondi * @date 12 May 2020 * @brief Saxpy * * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <assert.h> #include <time.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #define gpuErrchk(ans) \ { \ gpuAssert((ans), __FILE__, __LINE__); \ } static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define TWO02 (1 << 2) #define TWO04 (1 << 4) #define TWO08 (1 << 8) #ifndef N #define N (1 << 27) #endif #ifndef BLOCK_SIZE #define BLOCK_SIZE (512) #endif /* *SAXPY (host implementation) * y := a * x + y */ void host_saxpy(float * __restrict__ y, float a, float * __restrict__ x, int n) { #pragma omp parallel for simd schedule(simd: static) for (int i = 0; i < n; i++) { y[i] = a * x[i] + y[i]; } } __global__ void gpu_saxpy(float * __restrict__ y, float a, float * __restrict__ x, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a * x[i] + y[i]; } int main(int argc, const char **argv) { int iret = 0; int n = N; float *h_x, *d_x; float *h_y, *d_y; float *h_z; float a = 101.0f / TWO02, b, c; struct timespec rt[2]; double wt; // walltime if (argc > 1) n = atoi(argv[1]); //TODO Update malloc to cudaMallocHost or cudaMallocManaged (if necessary) if (NULL == (h_x = (float *)malloc(sizeof(float) * n))) { printf("error: memory allocation for 'x'\n"); iret = -1; } //TODO Update malloc to cudaMallocHost or cudaMallocManaged (if necessary) if (NULL == (h_y = (float *)malloc(sizeof(float) * n))) { printf("error: memory allocation for 'y'\n"); iret = -1; } if (NULL == (h_z = (float *)malloc(sizeof(float) * n))) { printf("error: memory allocation for 'z'\n"); iret = -1; } if (0 != iret) { //TODO Update cudaFreeHost or cudaFree (if necessary) free(h_x); //TODO Update cudaFreeHost or cudaFree (if necessary) free(h_y); free(h_z); exit(EXIT_FAILURE); } //Init Data b = rand() % TWO04; c = rand() % TWO08; for (int i = 0; i < n; i++) { h_x[i] = b / (float)TWO02; h_y[i] = h_z[i] = c / (float)TWO04; } //TODO Remove if unecessary gpuErrchk(cudaMalloc((void **)&d_x, sizeof(float) * n)); gpuErrchk(cudaMalloc((void **)&d_y, sizeof(float) * n)); clock_gettime(CLOCK_REALTIME, rt + 0); //TODO Remove if unecessary gpuErrchk(cudaMemcpy(d_x, h_x, sizeof(float) * n, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_y, h_y, sizeof(float) * n, cudaMemcpyHostToDevice)); gpu_saxpy<<<((n + BLOCK_SIZE - 1) / BLOCK_SIZE), BLOCK_SIZE>>>(d_y, a, d_x, n); gpuErrchk(cudaPeekAtLastError()); //TODO Remove if unecessary gpuErrchk(cudaMemcpy(h_y, d_y, sizeof(float) * n, cudaMemcpyDeviceToHost)); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2 * n / wt); //Check Matematical Consistency clock_gettime(CLOCK_REALTIME, rt + 0); host_saxpy(h_z, a, h_x, n); clock_gettime(CLOCK_REALTIME, rt + 1); wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy (Host): %9.3f sec %9.1f GFLOPS\n", wt, 2 * n / wt); for (int i = 0; i < n; ++i) { iret = *(int *)(h_y + i) ^ *(int *)(h_z + i); assert(iret == 0); } //TODO Update cudaFreeHost or cudaFree (if necessary) free(h_x); //TODO Remove if unecessary gpuErrchk(cudaFree(d_x)); //TODO Update cudaFreeHost or cudaFree (if necessary) free(h_y); //TODO Remove if unecessary gpuErrchk(cudaFree(d_y)); free(h_z); // CUDA exit -- needed to flush printf write buffer cudaDeviceReset(); return 0; }
0cd6597b2dbd71689db93c98e7aadc1804e461a3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "CorrelateDataBackward0Subtract_1d.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int nthreads = 1; int num = 1; int item = 1; int topwidth = XSIZE; int topheight = YSIZE; int topchannels = 1; int max_displacement = 1; int x_shift = 1; int neighborhood_grid_width = XSIZE; int kernel_radius = 1; int stride1 = 2; int stride2 = 2; int bottomwidth = XSIZE; int bottomheight = YSIZE; int pbottomwidth = XSIZE; int pbottomheight = YSIZE; int bottomchannels = 1; int bottomcount = 1; int pad_size = XSIZE*YSIZE; float *bottom0diff = NULL; hipMalloc(&bottom0diff, XSIZE*YSIZE); const float *bottom0 = NULL; hipMalloc(&bottom0, XSIZE*YSIZE); const float *bottom1 = NULL; hipMalloc(&bottom1, XSIZE*YSIZE); const float *topdiff = NULL; hipMalloc(&topdiff, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( CorrelateDataBackward0Subtract_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,num,item,topwidth,topheight,topchannels,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,pbottomwidth,pbottomheight,bottomchannels,bottomcount,pad_size,bottom0diff,bottom0,bottom1,topdiff); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( CorrelateDataBackward0Subtract_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,num,item,topwidth,topheight,topchannels,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,pbottomwidth,pbottomheight,bottomchannels,bottomcount,pad_size,bottom0diff,bottom0,bottom1,topdiff); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( CorrelateDataBackward0Subtract_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,num,item,topwidth,topheight,topchannels,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,pbottomwidth,pbottomheight,bottomchannels,bottomcount,pad_size,bottom0diff,bottom0,bottom1,topdiff); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0cd6597b2dbd71689db93c98e7aadc1804e461a3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "CorrelateDataBackward0Subtract_1d.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int nthreads = 1; int num = 1; int item = 1; int topwidth = XSIZE; int topheight = YSIZE; int topchannels = 1; int max_displacement = 1; int x_shift = 1; int neighborhood_grid_width = XSIZE; int kernel_radius = 1; int stride1 = 2; int stride2 = 2; int bottomwidth = XSIZE; int bottomheight = YSIZE; int pbottomwidth = XSIZE; int pbottomheight = YSIZE; int bottomchannels = 1; int bottomcount = 1; int pad_size = XSIZE*YSIZE; float *bottom0diff = NULL; cudaMalloc(&bottom0diff, XSIZE*YSIZE); const float *bottom0 = NULL; cudaMalloc(&bottom0, XSIZE*YSIZE); const float *bottom1 = NULL; cudaMalloc(&bottom1, XSIZE*YSIZE); const float *topdiff = NULL; cudaMalloc(&topdiff, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); CorrelateDataBackward0Subtract_1d<<<gridBlock,threadBlock>>>(nthreads,num,item,topwidth,topheight,topchannels,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,pbottomwidth,pbottomheight,bottomchannels,bottomcount,pad_size,bottom0diff,bottom0,bottom1,topdiff); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { CorrelateDataBackward0Subtract_1d<<<gridBlock,threadBlock>>>(nthreads,num,item,topwidth,topheight,topchannels,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,pbottomwidth,pbottomheight,bottomchannels,bottomcount,pad_size,bottom0diff,bottom0,bottom1,topdiff); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { CorrelateDataBackward0Subtract_1d<<<gridBlock,threadBlock>>>(nthreads,num,item,topwidth,topheight,topchannels,max_displacement,x_shift,neighborhood_grid_width,kernel_radius,stride1,stride2,bottomwidth,bottomheight,pbottomwidth,pbottomheight,bottomchannels,bottomcount,pad_size,bottom0diff,bottom0,bottom1,topdiff); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e18b026b0e50856457228325af57450f3e846b5d.hip
// !!! This is a file automatically generated by hipify!!! /*! Count triangles using the per-edge linear search. Use one thread for each triangle counter through OpenMP. */ #include <fmt/format.h> #include <iostream> #include <roctracer/roctx.h> #include <omp.h> #include <sys/types.h> #include <unistd.h> #include "clara/clara.hpp" #include "pangolin/algorithm/tc_edge_linear.cuh" #include "pangolin/configure.hpp" #include "pangolin/cuda_cxx/rc_stream.hpp" #include "pangolin/file/edge_list_file.hpp" #include "pangolin/init.hpp" #include "pangolin/sparse/csr_coo.hpp" int main(int argc, char **argv) { using pangolin::RcStream; pangolin::init(); std::vector<int> gpus; std::string path; int iters = 1; bool help = false; bool debug = false; bool verbose = false; size_t dimBlock = 64; bool readMostly = false; bool accessedBy = false; bool prefetchAsync = false; clara::Parser cli; cli = cli | clara::Help(help); cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr"); cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr"); cli = cli | clara::Opt(gpus, "ids")["-g"]("gpus to use"); cli = cli | clara::Opt(readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel"); cli = cli | clara::Opt(accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel"); cli = cli | clara::Opt(prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel"); cli = cli | clara::Opt(iters, "N")["-n"]("number of counts"); cli = cli | clara::Opt(dimBlock, "block-dim")["-b"]("Number of threads in a block"); cli = cli | clara::Arg(path, "graph file")("Path to adjacency list").required(); auto result = cli.parse(clara::Args(argc, argv)); if (!result) { LOG(error, "Error in command line: {}", result.errorMessage()); exit(1); } if (help) { std::cout << cli; return 0; } // set logging level if (verbose) { pangolin::logger::set_level(pangolin::logger::Level::TRACE); } else if (debug) { pangolin::logger::set_level(pangolin::logger::Level::DEBUG); } // log command line before much else happens { std::string cmd; for (int i = 0; i < argc; ++i) { if (i != 0) { cmd += " "; } cmd += argv[i]; } LOG(debug, cmd); } LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH); LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC); LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH); LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES); #ifndef NDEBUG LOG(warn, "Not a release build"); #endif if (gpus.empty()) { LOG(warn, "no GPUs provided on command line, using GPU 0"); gpus.push_back(0); } // Check for unified memory support bool managed = true; { hipDeviceProp_t prop; for (auto gpu : gpus) { CUDA_RUNTIME(hipGetDeviceProperties(&prop, gpu)); // We check for concurrentManagedAccess, as devices with only the // managedAccess property have extra synchronization requirements. if (!prop.concurrentManagedAccess) { LOG(warn, "device {} does not support concurrentManagedAccess", gpu); } managed = managed && prop.concurrentManagedAccess; } if (managed) { LOG(debug, "all devices support concurrent managed access"); } else { LOG(warn, "at least one device does not support concurrent managed access. " "read-duplicate may not occur"); } } // Check host page tables for pagable memory access bool hostPageTables = false; { hipDeviceProp_t prop; for (auto gpu : gpus) { CUDA_RUNTIME(hipGetDeviceProperties(&prop, gpu)); // if non-zero, setAccessedBy has no effect if (prop.pageableMemoryAccessUsesHostPageTables) { LOG(warn, "device {} uses host page takes for pageable memory accesses", gpu); } hostPageTables = hostPageTables || prop.pageableMemoryAccessUsesHostPageTables; } } if (hostPageTables) { LOG(warn, "at least one device used host page tables (accessed-by has no " "effect, read-only pages not created on access)"); } else { LOG(debug, "no devices use host page tables"); } // set up GPUs roctxRangePush("setup"); for (auto gpu : gpus) { CUDA_RUNTIME(hipSetDevice(gpu)); CUDA_RUNTIME(hipFree(0)); } roctxRangePop(); // read data roctxRangePush("read data"); auto start = std::chrono::system_clock::now(); pangolin::EdgeListFile file(path); std::vector<pangolin::DiEdge<uint64_t>> edges; std::vector<pangolin::DiEdge<uint64_t>> fileEdges; while (file.get_edges(fileEdges, 10)) { edges.insert(edges.end(), fileEdges.begin(), fileEdges.end()); } double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "read_data time {}s", elapsed); roctxRangePop(); LOG(debug, "read {} edges", edges.size()); // create one stream per GPU roctxRangePush("create streams"); std::vector<RcStream> streams; for (auto gpu : gpus) { streams.push_back(RcStream(gpu)); } roctxRangePop(); // create csr and count `iters` times std::vector<double> times; uint64_t nnz; uint64_t tris; for (int i = 0; i < iters; ++i) { // create csr CUDA_RUNTIME(hipSetDevice(gpus[0])); roctxRangePush("create CSR"); start = std::chrono::system_clock::now(); auto upperTriangular = [](pangolin::DiEdge<uint64_t> e) { return e.src < e.dst; }; auto csr = pangolin::CSRCOO<uint64_t>::from_edges(edges.begin(), edges.end(), upperTriangular); roctxRangePop(); LOG(debug, "nnz = {}", csr.nnz()); elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "create CSR time {}s", elapsed); // accessed-by roctxRangePush("accessed-by"); start = std::chrono::system_clock::now(); if (accessedBy) { for (const auto &gpu : gpus) { LOG(debug, "mark CSR accessed-by {}", gpu); csr.accessed_by(gpu); } } elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; roctxRangePop(); LOG(info, "accessed-by CSR time {}s", elapsed); // read-mostly roctxRangePush("read-mostly"); start = std::chrono::system_clock::now(); if (readMostly) { csr.read_mostly(); } elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; roctxRangePop(); LOG(info, "read-mostly CSR time {}s", elapsed); uint64_t total = 0; // total triangle count omp_set_num_threads(gpus.size()); start = std::chrono::system_clock::now(); #pragma omp parallel for for (size_t gpuIdx = 0; gpuIdx < gpus.size(); ++gpuIdx) { const int gpu = gpus[gpuIdx]; RcStream &stream = streams[gpuIdx]; CUDA_RUNTIME(hipSetDevice(gpu)); // prefetch if (prefetchAsync) { LOG(debug, "omp thread {}: prefetch csr to device {}", omp_get_thread_num(), gpu); roctxRangePush("prefetch"); csr.prefetch_async(gpu, stream.stream()); roctxRangePop(); } // count triangles roctxRangePush("count"); // create async counters LOG(debug, "omp thread {}: create device {} counter", omp_get_thread_num(), gpu); pangolin::LinearTC counter(gpu, stream); // determine the number of edges per gpu const size_t edgesPerGPU = (csr.nnz() + gpus.size() - 1) / gpus.size(); LOG(debug, "omp thread {}: {} edges per GPU", omp_get_thread_num(), edgesPerGPU); // launch counting operations const size_t edgeStart = edgesPerGPU * gpuIdx; const size_t edgeStop = ::min(edgeStart + edgesPerGPU, csr.nnz()); const size_t numEdges = edgeStop - edgeStart; LOG(debug, "omp thread {}: start async count on GPU {} ({} edges)", omp_get_thread_num(), counter.device(), numEdges); counter.count_async(csr.view(), edgeStart, numEdges, dimBlock); // wait for counting operations to finish LOG(debug, "omp thread {}: wait for counter on GPU {}", omp_get_thread_num(), counter.device()); counter.sync(); roctxRangePop(); #pragma omp atomic total += counter.count(); } // gpus elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "prefetch/count time {}s", elapsed); LOG(info, "{} triangles ({} teps)", total, csr.nnz() / elapsed); times.push_back(elapsed); tris = total; nnz = csr.nnz(); } // iters std::cout << path << ",\t" << nnz << ",\t" << tris; for (const auto &t : times) { std::cout << ",\t" << t; } std::cout << std::endl; return 0; }
e18b026b0e50856457228325af57450f3e846b5d.cu
/*! Count triangles using the per-edge linear search. Use one thread for each triangle counter through OpenMP. */ #include <fmt/format.h> #include <iostream> #include <nvToolsExt.h> #include <omp.h> #include <sys/types.h> #include <unistd.h> #include "clara/clara.hpp" #include "pangolin/algorithm/tc_edge_linear.cuh" #include "pangolin/configure.hpp" #include "pangolin/cuda_cxx/rc_stream.hpp" #include "pangolin/file/edge_list_file.hpp" #include "pangolin/init.hpp" #include "pangolin/sparse/csr_coo.hpp" int main(int argc, char **argv) { using pangolin::RcStream; pangolin::init(); std::vector<int> gpus; std::string path; int iters = 1; bool help = false; bool debug = false; bool verbose = false; size_t dimBlock = 64; bool readMostly = false; bool accessedBy = false; bool prefetchAsync = false; clara::Parser cli; cli = cli | clara::Help(help); cli = cli | clara::Opt(debug)["--debug"]("print debug messages to stderr"); cli = cli | clara::Opt(verbose)["--verbose"]("print verbose messages to stderr"); cli = cli | clara::Opt(gpus, "ids")["-g"]("gpus to use"); cli = cli | clara::Opt(readMostly)["--read-mostly"]("mark data as read-mostly by all gpus before kernel"); cli = cli | clara::Opt(accessedBy)["--accessed-by"]("mark data as accessed-by all GPUs before kernel"); cli = cli | clara::Opt(prefetchAsync)["--prefetch-async"]("prefetch data to all GPUs before kernel"); cli = cli | clara::Opt(iters, "N")["-n"]("number of counts"); cli = cli | clara::Opt(dimBlock, "block-dim")["-b"]("Number of threads in a block"); cli = cli | clara::Arg(path, "graph file")("Path to adjacency list").required(); auto result = cli.parse(clara::Args(argc, argv)); if (!result) { LOG(error, "Error in command line: {}", result.errorMessage()); exit(1); } if (help) { std::cout << cli; return 0; } // set logging level if (verbose) { pangolin::logger::set_level(pangolin::logger::Level::TRACE); } else if (debug) { pangolin::logger::set_level(pangolin::logger::Level::DEBUG); } // log command line before much else happens { std::string cmd; for (int i = 0; i < argc; ++i) { if (i != 0) { cmd += " "; } cmd += argv[i]; } LOG(debug, cmd); } LOG(debug, "pangolin version: {}.{}.{}", PANGOLIN_VERSION_MAJOR, PANGOLIN_VERSION_MINOR, PANGOLIN_VERSION_PATCH); LOG(debug, "pangolin branch: {}", PANGOLIN_GIT_REFSPEC); LOG(debug, "pangolin sha: {}", PANGOLIN_GIT_HASH); LOG(debug, "pangolin changes: {}", PANGOLIN_GIT_LOCAL_CHANGES); #ifndef NDEBUG LOG(warn, "Not a release build"); #endif if (gpus.empty()) { LOG(warn, "no GPUs provided on command line, using GPU 0"); gpus.push_back(0); } // Check for unified memory support bool managed = true; { cudaDeviceProp prop; for (auto gpu : gpus) { CUDA_RUNTIME(cudaGetDeviceProperties(&prop, gpu)); // We check for concurrentManagedAccess, as devices with only the // managedAccess property have extra synchronization requirements. if (!prop.concurrentManagedAccess) { LOG(warn, "device {} does not support concurrentManagedAccess", gpu); } managed = managed && prop.concurrentManagedAccess; } if (managed) { LOG(debug, "all devices support concurrent managed access"); } else { LOG(warn, "at least one device does not support concurrent managed access. " "read-duplicate may not occur"); } } // Check host page tables for pagable memory access bool hostPageTables = false; { cudaDeviceProp prop; for (auto gpu : gpus) { CUDA_RUNTIME(cudaGetDeviceProperties(&prop, gpu)); // if non-zero, setAccessedBy has no effect if (prop.pageableMemoryAccessUsesHostPageTables) { LOG(warn, "device {} uses host page takes for pageable memory accesses", gpu); } hostPageTables = hostPageTables || prop.pageableMemoryAccessUsesHostPageTables; } } if (hostPageTables) { LOG(warn, "at least one device used host page tables (accessed-by has no " "effect, read-only pages not created on access)"); } else { LOG(debug, "no devices use host page tables"); } // set up GPUs nvtxRangePush("setup"); for (auto gpu : gpus) { CUDA_RUNTIME(cudaSetDevice(gpu)); CUDA_RUNTIME(cudaFree(0)); } nvtxRangePop(); // read data nvtxRangePush("read data"); auto start = std::chrono::system_clock::now(); pangolin::EdgeListFile file(path); std::vector<pangolin::DiEdge<uint64_t>> edges; std::vector<pangolin::DiEdge<uint64_t>> fileEdges; while (file.get_edges(fileEdges, 10)) { edges.insert(edges.end(), fileEdges.begin(), fileEdges.end()); } double elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "read_data time {}s", elapsed); nvtxRangePop(); LOG(debug, "read {} edges", edges.size()); // create one stream per GPU nvtxRangePush("create streams"); std::vector<RcStream> streams; for (auto gpu : gpus) { streams.push_back(RcStream(gpu)); } nvtxRangePop(); // create csr and count `iters` times std::vector<double> times; uint64_t nnz; uint64_t tris; for (int i = 0; i < iters; ++i) { // create csr CUDA_RUNTIME(cudaSetDevice(gpus[0])); nvtxRangePush("create CSR"); start = std::chrono::system_clock::now(); auto upperTriangular = [](pangolin::DiEdge<uint64_t> e) { return e.src < e.dst; }; auto csr = pangolin::CSRCOO<uint64_t>::from_edges(edges.begin(), edges.end(), upperTriangular); nvtxRangePop(); LOG(debug, "nnz = {}", csr.nnz()); elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "create CSR time {}s", elapsed); // accessed-by nvtxRangePush("accessed-by"); start = std::chrono::system_clock::now(); if (accessedBy) { for (const auto &gpu : gpus) { LOG(debug, "mark CSR accessed-by {}", gpu); csr.accessed_by(gpu); } } elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; nvtxRangePop(); LOG(info, "accessed-by CSR time {}s", elapsed); // read-mostly nvtxRangePush("read-mostly"); start = std::chrono::system_clock::now(); if (readMostly) { csr.read_mostly(); } elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; nvtxRangePop(); LOG(info, "read-mostly CSR time {}s", elapsed); uint64_t total = 0; // total triangle count omp_set_num_threads(gpus.size()); start = std::chrono::system_clock::now(); #pragma omp parallel for for (size_t gpuIdx = 0; gpuIdx < gpus.size(); ++gpuIdx) { const int gpu = gpus[gpuIdx]; RcStream &stream = streams[gpuIdx]; CUDA_RUNTIME(cudaSetDevice(gpu)); // prefetch if (prefetchAsync) { LOG(debug, "omp thread {}: prefetch csr to device {}", omp_get_thread_num(), gpu); nvtxRangePush("prefetch"); csr.prefetch_async(gpu, stream.stream()); nvtxRangePop(); } // count triangles nvtxRangePush("count"); // create async counters LOG(debug, "omp thread {}: create device {} counter", omp_get_thread_num(), gpu); pangolin::LinearTC counter(gpu, stream); // determine the number of edges per gpu const size_t edgesPerGPU = (csr.nnz() + gpus.size() - 1) / gpus.size(); LOG(debug, "omp thread {}: {} edges per GPU", omp_get_thread_num(), edgesPerGPU); // launch counting operations const size_t edgeStart = edgesPerGPU * gpuIdx; const size_t edgeStop = std::min(edgeStart + edgesPerGPU, csr.nnz()); const size_t numEdges = edgeStop - edgeStart; LOG(debug, "omp thread {}: start async count on GPU {} ({} edges)", omp_get_thread_num(), counter.device(), numEdges); counter.count_async(csr.view(), edgeStart, numEdges, dimBlock); // wait for counting operations to finish LOG(debug, "omp thread {}: wait for counter on GPU {}", omp_get_thread_num(), counter.device()); counter.sync(); nvtxRangePop(); #pragma omp atomic total += counter.count(); } // gpus elapsed = (std::chrono::system_clock::now() - start).count() / 1e9; LOG(info, "prefetch/count time {}s", elapsed); LOG(info, "{} triangles ({} teps)", total, csr.nnz() / elapsed); times.push_back(elapsed); tris = total; nnz = csr.nnz(); } // iters std::cout << path << ",\t" << nnz << ",\t" << tris; for (const auto &t : times) { std::cout << ",\t" << t; } std::cout << std::endl; return 0; }
93592fa5e5688a0d2a52bd6028d24707dfe61207.hip
// !!! This is a file automatically generated by hipify!!! #include "gpu_func.h" #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <iostream> #include "rocblas.h" #define BLOCK_SIZE 32 #define NUM_THREADS 256 #define MAX_NUM_BLOCK 65535 __global__ void device_add_one(int* d_result, int t) { *d_result = t + 1; } /* Just a dummy function that can be used to warm up GPU */ int useless_gpu_add_one(int t) { int result; int* d_result; checkCudaErrors(hipMalloc((void**)&d_result, 1 * sizeof(int))); event_pair timer; start_timer(&timer); hipLaunchKernelGGL(( device_add_one), dim3(1),dim3(1), 0, 0, d_result, t); check_launch("device_add_one"); double time = stop_timer(&timer); std::cout << "device_add_one took: " << time << " seconds" << std::endl; checkCudaErrors(hipMemcpy(&result, d_result, 1 * sizeof(int), hipMemcpyDeviceToHost)); return result; } __global__ void gpu_softmax_kernel(double *mat, const int M, const int N) { const unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < N) { double sum = 0.0; // iterate all classes for (size_t c = 0; c < M; ++c) { const unsigned int index = M * col + c; mat[index] = ::exp(mat[index]); sum += mat[index]; } for (size_t c = 0; c < M; ++c) { const unsigned int index = M * col + c; mat[index] /= sum; } } } void gpu_softmax(double *mat, const int M, const int N) { dim3 block(BLOCK_SIZE); const unsigned int grid_x = ceil(N / (float)block.x); dim3 grid(grid_x); hipLaunchKernelGGL(( gpu_softmax_kernel), dim3(grid), dim3(block), 0, 0, mat, M, N); }; // __global__ // void gpu_transpose_kernel(double *mat1, double *mat2, int M, int N) { // const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; // const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; // if (row < M && col < N) { // const unsigned int index = M * col + row; // const unsigned int new_index = N * row + col; // mat2[new_index] = mat1[index]; // } // } // void gpu_transpose(double *mat1, double *mat2, int M, int N) { // dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); // const unsigned int grid_x = ceil(M / (float)block.x); // const unsigned int grid_y = ceil(N / (float)block.y); // dim3 grid(grid_x, grid_y); // gpu_transpose_kernel<<<grid, block>>>(mat1, mat2, M, N); // } __global__ void gpu_linear_kernel(const double* __restrict__ mat1, const double* __restrict__ mat2, double* __restrict__ mat3, const double alpha, const double beta, const int M, const int N) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < M && col < N) { const unsigned int index = M * col + row; mat3[index] = alpha * mat1[index] + beta * mat2[index]; } } void gpu_linear(double *mat1, double *mat2, double *mat3, const double alpha, const double beta, const int M, const int N) { dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( gpu_linear_kernel), dim3(grid), dim3(block), 0, 0, mat1, mat2, mat3, alpha, beta, M, N); } __global__ void gpu_one_minus_kernel(const double* __restrict__ mat1, double* __restrict__ mat2, const int M, const int N) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < M && col < N) { const unsigned int index = M * col + row; mat2[index] = 1.0 - mat1[index]; } } void gpu_one_minus(double *mat1, double *mat2, const int M, const int N) { dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( gpu_one_minus_kernel), dim3(grid), dim3(block), 0, 0, mat1, mat2, M, N); } __global__ void gpu_row_sum_kernel(const double* __restrict__ mat1, double* __restrict__ mat2, const int M, const int N) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; if (row < M) { double sum = 0.0; for (size_t col = 0; col < N; ++col) { sum += mat1[M * col + row]; } mat2[row] = sum; } } void gpu_row_sum(double *mat1, double *mat2, const int M, const int N) { dim3 block(BLOCK_SIZE); dim3 grid(ceil(N / (float)block.x)); hipLaunchKernelGGL(( gpu_row_sum_kernel), dim3(grid), dim3(block), 0, 0, mat1, mat2, M, N); } __global__ void gpu_elem_mult_kernel(const double* __restrict__ mat1, double* __restrict__ mat2, const double alpha, const int M, const int N) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < M && col < N) { const unsigned int index = M * col + row; mat2[index] *= alpha * mat1[index]; } } void gpu_elem_mult(double* mat1, double* mat2, const double alpha, const int M, const int N) { dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( gpu_elem_mult_kernel), dim3(grid), dim3(block), 0, 0, mat1, mat2, alpha, M, N); } __global__ void gpu_GEMMSigmoid(const double* __restrict__ A, const double* __restrict__ B, double* __restrict__ C, const double alpha, const double beta, const int M, const int N, const int K) { // thread row and column within Csub const unsigned int row = threadIdx.x; const unsigned int col = threadIdx.y; // index within grid const unsigned int grid_row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_col = blockIdx.y * blockDim.y + threadIdx.y; double value = 0.0; const unsigned int iter = ceil(K / float(BLOCK_SIZE)); for (int i = 0; i < iter; ++i) { // shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE+1]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE+1]; // load Asub const unsigned int A_col = BLOCK_SIZE * i + col; if (grid_row < M && A_col < K) { As[row][col] = A[M * A_col + grid_row]; } // load Bsub const unsigned int B_row = row + BLOCK_SIZE * i; if (B_row < K && grid_col < N) { Bs[row][col] = B[K * grid_col + B_row]; } __syncthreads(); unsigned int num_elems = BLOCK_SIZE; if ((K - i * BLOCK_SIZE) < BLOCK_SIZE) { num_elems = K - i * BLOCK_SIZE; } for (int j = 0; j < num_elems; ++j) { value += As[row][j] * Bs[j][col]; } __syncthreads(); } if (grid_row < M && grid_col < N) { const unsigned int index = M * grid_col + grid_row; value = alpha * value + beta * C[index]; C[index] = 1.0 / (1.0 + ::exp(-value)); } } void GEMMSigmoid(double* A, double* B, double* C, const double alpha, const double beta, const int M, const int N, const int K) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); hipLaunchKernelGGL(( gpu_GEMMSigmoid), dim3(grid), dim3(block), 0, 0, A, B, C, alpha, beta, M, N, K); } __global__ void gpu_GEMMT1(double* A, double* B, double* C, const double alpha, const double beta, const int M, const int N, const int K) { // thread row and column within Csub const unsigned int row = threadIdx.x; const unsigned int col = threadIdx.y; // index within grid const unsigned int grid_row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_col = blockIdx.y * blockDim.y + threadIdx.y; double value = 0; const unsigned int iter = ceil(K / float(BLOCK_SIZE)); for (int i = 0; i < iter; ++i) { // shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE+1]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE+1]; // load Asub const unsigned int A_col = BLOCK_SIZE * i + col; if (grid_row < M && A_col < K) { As[row][col] = A[K * grid_row + A_col]; } // load Bsub const unsigned int B_row = row + BLOCK_SIZE * i; if (B_row < K && grid_col < N) { Bs[row][col] = B[K * grid_col + B_row]; } __syncthreads(); unsigned int num_elems = BLOCK_SIZE; if ((K - i * BLOCK_SIZE) < BLOCK_SIZE) { num_elems = K - i * BLOCK_SIZE; } for (int j = 0; j < num_elems; ++j) { value += As[row][j] * Bs[j][col]; } __syncthreads(); } if (grid_row < M && grid_col < N) { const unsigned int index = M * grid_col + grid_row; C[index] = alpha * value + beta * C[index]; } } void GEMMT1(double* A, double* B, double* C, const double alpha, const double beta, const int M, const int N, const int K) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( gpu_GEMMT1), dim3(grid), dim3(block), 0, 0, A, B, C, alpha, beta, M, N, K); } __global__ void gpu_GEMMT2(const double* __restrict__ A, const double* __restrict__ B, double* __restrict__ C, const double alpha, const double beta, const int M, const int N, const int K) { // thread row and column within Csub const unsigned int row = threadIdx.x; const unsigned int col = threadIdx.y; // index within grid const unsigned int grid_row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_col = blockIdx.y * blockDim.y + threadIdx.y; double value = 0.0; const unsigned int iter = ceil(K / float(BLOCK_SIZE)); for (int i = 0; i < iter; ++i) { // shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE+1]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE+1]; // load Asub const unsigned int A_col = BLOCK_SIZE * i + col; if (grid_row < M && A_col < K) { As[row][col] = A[M * A_col + grid_row]; } // load Bsub (transpose) const unsigned int B_row = row + BLOCK_SIZE * i; if (B_row < K && grid_col < N) { Bs[row][col] = B[N * B_row + grid_col]; } __syncthreads(); unsigned int num_elems = BLOCK_SIZE; if ((K - i * BLOCK_SIZE) < BLOCK_SIZE) { num_elems = K - i * BLOCK_SIZE; } for (int j = 0; j < num_elems; ++j) { value += As[row][j] * Bs[j][col]; } __syncthreads(); } if (grid_row < M && grid_col < N) { const unsigned int index = M * grid_col + grid_row; C[index] = alpha * value + beta * C[index]; } } void GEMMT2(double* A, double* B, double* C, const double alpha, const double beta, const int M, const int N, const int K) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( gpu_GEMMT2), dim3(grid), dim3(block), 0, 0, A, B, C, alpha, beta, M, N, K); } /* * Routine to perform an in-place GEMM operation, i.e., C := alpha*A*B + beta*C * A: (M, K), B: (K, N), C: (M, N) * All matrices are organized in column-major order. */ /* Algorithm 2: * Shared memory */ __global__ void gpu_GEMM(const double* __restrict__ A, const double* __restrict__ B, double* __restrict__ C, const double alpha, const double beta, const int M, const int N, const int K) { // thread row and column within Csub const unsigned int row = threadIdx.x; const unsigned int col = threadIdx.y; // index within grid const unsigned int grid_row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_col = blockIdx.y * blockDim.y + threadIdx.y; double value = 0.0; const unsigned int iter = ceil(K / float(BLOCK_SIZE)); for (int i = 0; i < iter; ++i) { // shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE+1]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE+1]; // load Asub const unsigned int A_col = BLOCK_SIZE * i + col; if (grid_row < M && A_col < K) { As[row][col] = A[M * A_col + grid_row]; } // load Bsub const unsigned int B_row = row + BLOCK_SIZE * i; if (B_row < K && grid_col < N) { Bs[row][col] = B[K * grid_col + B_row]; } __syncthreads(); unsigned int num_elems = BLOCK_SIZE; if ((K - i * BLOCK_SIZE) < BLOCK_SIZE) { num_elems = K - i * BLOCK_SIZE; } for (int j = 0; j < num_elems; ++j) { value += As[row][j] * Bs[j][col]; } __syncthreads(); } if (grid_row < M && grid_col < N) { const unsigned int index = M * grid_col + grid_row; C[index] = alpha * value + beta * C[index]; } } int myGEMM(double* A, double* B, double* C, double* alpha, double* beta, int M, int N, int K) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); hipLaunchKernelGGL(( gpu_GEMM), dim3(grid), dim3(block), 0, 0, A, B, C, *alpha, *beta, M, N, K); return 0; } /* Algorithm 1: * Each thread computes one element of C by accumulating results into value */ __global__ void gpu_GEMM_1(double* A, double* B, double* C, double alpha, double beta, int M, int N, int K) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < M && col < N) { double value = 0.0; const unsigned int index = M * col + row; for (size_t i = 0; i < K; ++i) { value += alpha * A[M * i + row] * B[K * col + i]; } value += beta * C[index]; C[index] = value; } } int myGEMM_1(double* A, double* B, double* C, double* alpha, double* beta, int M, int N, int K) { dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( gpu_GEMM_1), dim3(grid), dim3(block), 0, 0, A, B, C, *alpha, *beta, M, N, K); return 0; }
93592fa5e5688a0d2a52bd6028d24707dfe61207.cu
#include "gpu_func.h" #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <iostream> #include "cublas_v2.h" #define BLOCK_SIZE 32 #define NUM_THREADS 256 #define MAX_NUM_BLOCK 65535 __global__ void device_add_one(int* d_result, int t) { *d_result = t + 1; } /* Just a dummy function that can be used to warm up GPU */ int useless_gpu_add_one(int t) { int result; int* d_result; checkCudaErrors(cudaMalloc((void**)&d_result, 1 * sizeof(int))); event_pair timer; start_timer(&timer); device_add_one<<<1,1>>>(d_result, t); check_launch("device_add_one"); double time = stop_timer(&timer); std::cout << "device_add_one took: " << time << " seconds" << std::endl; checkCudaErrors(cudaMemcpy(&result, d_result, 1 * sizeof(int), cudaMemcpyDeviceToHost)); return result; } __global__ void gpu_softmax_kernel(double *mat, const int M, const int N) { const unsigned int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < N) { double sum = 0.0; // iterate all classes for (size_t c = 0; c < M; ++c) { const unsigned int index = M * col + c; mat[index] = std::exp(mat[index]); sum += mat[index]; } for (size_t c = 0; c < M; ++c) { const unsigned int index = M * col + c; mat[index] /= sum; } } } void gpu_softmax(double *mat, const int M, const int N) { dim3 block(BLOCK_SIZE); const unsigned int grid_x = ceil(N / (float)block.x); dim3 grid(grid_x); gpu_softmax_kernel<<<grid, block>>>(mat, M, N); }; // __global__ // void gpu_transpose_kernel(double *mat1, double *mat2, int M, int N) { // const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; // const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; // if (row < M && col < N) { // const unsigned int index = M * col + row; // const unsigned int new_index = N * row + col; // mat2[new_index] = mat1[index]; // } // } // void gpu_transpose(double *mat1, double *mat2, int M, int N) { // dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); // const unsigned int grid_x = ceil(M / (float)block.x); // const unsigned int grid_y = ceil(N / (float)block.y); // dim3 grid(grid_x, grid_y); // gpu_transpose_kernel<<<grid, block>>>(mat1, mat2, M, N); // } __global__ void gpu_linear_kernel(const double* __restrict__ mat1, const double* __restrict__ mat2, double* __restrict__ mat3, const double alpha, const double beta, const int M, const int N) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < M && col < N) { const unsigned int index = M * col + row; mat3[index] = alpha * mat1[index] + beta * mat2[index]; } } void gpu_linear(double *mat1, double *mat2, double *mat3, const double alpha, const double beta, const int M, const int N) { dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); gpu_linear_kernel<<<grid, block>>>(mat1, mat2, mat3, alpha, beta, M, N); } __global__ void gpu_one_minus_kernel(const double* __restrict__ mat1, double* __restrict__ mat2, const int M, const int N) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < M && col < N) { const unsigned int index = M * col + row; mat2[index] = 1.0 - mat1[index]; } } void gpu_one_minus(double *mat1, double *mat2, const int M, const int N) { dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); gpu_one_minus_kernel<<<grid, block>>>(mat1, mat2, M, N); } __global__ void gpu_row_sum_kernel(const double* __restrict__ mat1, double* __restrict__ mat2, const int M, const int N) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; if (row < M) { double sum = 0.0; for (size_t col = 0; col < N; ++col) { sum += mat1[M * col + row]; } mat2[row] = sum; } } void gpu_row_sum(double *mat1, double *mat2, const int M, const int N) { dim3 block(BLOCK_SIZE); dim3 grid(ceil(N / (float)block.x)); gpu_row_sum_kernel<<<grid, block>>>(mat1, mat2, M, N); } __global__ void gpu_elem_mult_kernel(const double* __restrict__ mat1, double* __restrict__ mat2, const double alpha, const int M, const int N) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < M && col < N) { const unsigned int index = M * col + row; mat2[index] *= alpha * mat1[index]; } } void gpu_elem_mult(double* mat1, double* mat2, const double alpha, const int M, const int N) { dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); gpu_elem_mult_kernel<<<grid, block>>>(mat1, mat2, alpha, M, N); } __global__ void gpu_GEMMSigmoid(const double* __restrict__ A, const double* __restrict__ B, double* __restrict__ C, const double alpha, const double beta, const int M, const int N, const int K) { // thread row and column within Csub const unsigned int row = threadIdx.x; const unsigned int col = threadIdx.y; // index within grid const unsigned int grid_row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_col = blockIdx.y * blockDim.y + threadIdx.y; double value = 0.0; const unsigned int iter = ceil(K / float(BLOCK_SIZE)); for (int i = 0; i < iter; ++i) { // shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE+1]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE+1]; // load Asub const unsigned int A_col = BLOCK_SIZE * i + col; if (grid_row < M && A_col < K) { As[row][col] = A[M * A_col + grid_row]; } // load Bsub const unsigned int B_row = row + BLOCK_SIZE * i; if (B_row < K && grid_col < N) { Bs[row][col] = B[K * grid_col + B_row]; } __syncthreads(); unsigned int num_elems = BLOCK_SIZE; if ((K - i * BLOCK_SIZE) < BLOCK_SIZE) { num_elems = K - i * BLOCK_SIZE; } for (int j = 0; j < num_elems; ++j) { value += As[row][j] * Bs[j][col]; } __syncthreads(); } if (grid_row < M && grid_col < N) { const unsigned int index = M * grid_col + grid_row; value = alpha * value + beta * C[index]; C[index] = 1.0 / (1.0 + std::exp(-value)); } } void GEMMSigmoid(double* A, double* B, double* C, const double alpha, const double beta, const int M, const int N, const int K) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); gpu_GEMMSigmoid<<<grid, block>>>(A, B, C, alpha, beta, M, N, K); } __global__ void gpu_GEMMT1(double* A, double* B, double* C, const double alpha, const double beta, const int M, const int N, const int K) { // thread row and column within Csub const unsigned int row = threadIdx.x; const unsigned int col = threadIdx.y; // index within grid const unsigned int grid_row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_col = blockIdx.y * blockDim.y + threadIdx.y; double value = 0; const unsigned int iter = ceil(K / float(BLOCK_SIZE)); for (int i = 0; i < iter; ++i) { // shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE+1]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE+1]; // load Asub const unsigned int A_col = BLOCK_SIZE * i + col; if (grid_row < M && A_col < K) { As[row][col] = A[K * grid_row + A_col]; } // load Bsub const unsigned int B_row = row + BLOCK_SIZE * i; if (B_row < K && grid_col < N) { Bs[row][col] = B[K * grid_col + B_row]; } __syncthreads(); unsigned int num_elems = BLOCK_SIZE; if ((K - i * BLOCK_SIZE) < BLOCK_SIZE) { num_elems = K - i * BLOCK_SIZE; } for (int j = 0; j < num_elems; ++j) { value += As[row][j] * Bs[j][col]; } __syncthreads(); } if (grid_row < M && grid_col < N) { const unsigned int index = M * grid_col + grid_row; C[index] = alpha * value + beta * C[index]; } } void GEMMT1(double* A, double* B, double* C, const double alpha, const double beta, const int M, const int N, const int K) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); gpu_GEMMT1<<<grid, block>>>(A, B, C, alpha, beta, M, N, K); } __global__ void gpu_GEMMT2(const double* __restrict__ A, const double* __restrict__ B, double* __restrict__ C, const double alpha, const double beta, const int M, const int N, const int K) { // thread row and column within Csub const unsigned int row = threadIdx.x; const unsigned int col = threadIdx.y; // index within grid const unsigned int grid_row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_col = blockIdx.y * blockDim.y + threadIdx.y; double value = 0.0; const unsigned int iter = ceil(K / float(BLOCK_SIZE)); for (int i = 0; i < iter; ++i) { // shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE+1]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE+1]; // load Asub const unsigned int A_col = BLOCK_SIZE * i + col; if (grid_row < M && A_col < K) { As[row][col] = A[M * A_col + grid_row]; } // load Bsub (transpose) const unsigned int B_row = row + BLOCK_SIZE * i; if (B_row < K && grid_col < N) { Bs[row][col] = B[N * B_row + grid_col]; } __syncthreads(); unsigned int num_elems = BLOCK_SIZE; if ((K - i * BLOCK_SIZE) < BLOCK_SIZE) { num_elems = K - i * BLOCK_SIZE; } for (int j = 0; j < num_elems; ++j) { value += As[row][j] * Bs[j][col]; } __syncthreads(); } if (grid_row < M && grid_col < N) { const unsigned int index = M * grid_col + grid_row; C[index] = alpha * value + beta * C[index]; } } void GEMMT2(double* A, double* B, double* C, const double alpha, const double beta, const int M, const int N, const int K) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); gpu_GEMMT2<<<grid, block>>>(A, B, C, alpha, beta, M, N, K); } /* * Routine to perform an in-place GEMM operation, i.e., C := alpha*A*B + beta*C * A: (M, K), B: (K, N), C: (M, N) * All matrices are organized in column-major order. */ /* Algorithm 2: * Shared memory */ __global__ void gpu_GEMM(const double* __restrict__ A, const double* __restrict__ B, double* __restrict__ C, const double alpha, const double beta, const int M, const int N, const int K) { // thread row and column within Csub const unsigned int row = threadIdx.x; const unsigned int col = threadIdx.y; // index within grid const unsigned int grid_row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int grid_col = blockIdx.y * blockDim.y + threadIdx.y; double value = 0.0; const unsigned int iter = ceil(K / float(BLOCK_SIZE)); for (int i = 0; i < iter; ++i) { // shared memory used to store Asub and Bsub respectively __shared__ double As[BLOCK_SIZE][BLOCK_SIZE+1]; __shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE+1]; // load Asub const unsigned int A_col = BLOCK_SIZE * i + col; if (grid_row < M && A_col < K) { As[row][col] = A[M * A_col + grid_row]; } // load Bsub const unsigned int B_row = row + BLOCK_SIZE * i; if (B_row < K && grid_col < N) { Bs[row][col] = B[K * grid_col + B_row]; } __syncthreads(); unsigned int num_elems = BLOCK_SIZE; if ((K - i * BLOCK_SIZE) < BLOCK_SIZE) { num_elems = K - i * BLOCK_SIZE; } for (int j = 0; j < num_elems; ++j) { value += As[row][j] * Bs[j][col]; } __syncthreads(); } if (grid_row < M && grid_col < N) { const unsigned int index = M * grid_col + grid_row; C[index] = alpha * value + beta * C[index]; } } int myGEMM(double* A, double* B, double* C, double* alpha, double* beta, int M, int N, int K) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); gpu_GEMM<<<grid, block>>>(A, B, C, *alpha, *beta, M, N, K); return 0; } /* Algorithm 1: * Each thread computes one element of C by accumulating results into value */ __global__ void gpu_GEMM_1(double* A, double* B, double* C, double alpha, double beta, int M, int N, int K) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < M && col < N) { double value = 0.0; const unsigned int index = M * col + row; for (size_t i = 0; i < K; ++i) { value += alpha * A[M * i + row] * B[K * col + i]; } value += beta * C[index]; C[index] = value; } } int myGEMM_1(double* A, double* B, double* C, double* alpha, double* beta, int M, int N, int K) { dim3 block(BLOCK_SIZE, NUM_THREADS/BLOCK_SIZE); const unsigned int grid_x = ceil(M / (float)block.x); const unsigned int grid_y = ceil(N / (float)block.y); dim3 grid(grid_x, grid_y); gpu_GEMM_1<<<grid, block>>>(A, B, C, *alpha, *beta, M, N, K); return 0; }
32cd671c3d3c5182c62fcad1baf46b03b077a4c5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2008 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); /////////////////////////////////////////////////////////////////////////////// // Program main /////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer and dimension for host memory int n, dimA; float *h_a; // pointers for device memory float *d_a, *d_b; // allocate and initialize host memory // Bonus: try using hipHostMalloc in place of malloc dimA = 8; //h_a = (float *) malloc(dimA*sizeof(float)); size_t memSize = dimA*sizeof(float); hipHostMalloc(&h_a, memSize); for (n=0; n<dimA; n++) { h_a[n] = (float) n; } // Part 1 of 5: allocate device memory hipMalloc(&d_a, memSize); hipMalloc(&d_b, memSize); // Part 2 of 5: host to device memory copy hipMemcpy(d_a, h_a, memSize, hipMemcpyHostToDevice); // Part 3 of 5: device to device memory copy hipMemcpy(d_b, d_a, memSize, hipMemcpyDeviceToDevice); // clear host memory for (n=0; n<dimA; n++) { h_a[n] = 0.f; } // Part 4 of 5: device to host copy hipMemcpy(h_a, d_b, memSize, hipMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("hipMemcpy calls"); // verify the data on the host is correct for (n=0; n<dimA; n++) { assert(h_a[n] == (float) n); } // Part 5 of 5: free device memory pointers d_a and d_b hipFree(d_a); hipFree(d_b); // Check for any CUDA errors checkCUDAError("hipFree"); // free host memory pointer h_a // Bonus: be sure to use hipHostFree for memory allocated with hipHostMalloc hipHostFree(h_a); //free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err) ); exit(-1); } }
32cd671c3d3c5182c62fcad1baf46b03b077a4c5.cu
/* * Copyright 1993-2008 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ // includes, system #include <stdio.h> #include <assert.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); /////////////////////////////////////////////////////////////////////////////// // Program main /////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // pointer and dimension for host memory int n, dimA; float *h_a; // pointers for device memory float *d_a, *d_b; // allocate and initialize host memory // Bonus: try using cudaMallocHost in place of malloc dimA = 8; //h_a = (float *) malloc(dimA*sizeof(float)); size_t memSize = dimA*sizeof(float); cudaMallocHost(&h_a, memSize); for (n=0; n<dimA; n++) { h_a[n] = (float) n; } // Part 1 of 5: allocate device memory cudaMalloc(&d_a, memSize); cudaMalloc(&d_b, memSize); // Part 2 of 5: host to device memory copy cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice); // Part 3 of 5: device to device memory copy cudaMemcpy(d_b, d_a, memSize, cudaMemcpyDeviceToDevice); // clear host memory for (n=0; n<dimA; n++) { h_a[n] = 0.f; } // Part 4 of 5: device to host copy cudaMemcpy(h_a, d_b, memSize, cudaMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("cudaMemcpy calls"); // verify the data on the host is correct for (n=0; n<dimA; n++) { assert(h_a[n] == (float) n); } // Part 5 of 5: free device memory pointers d_a and d_b cudaFree(d_a); cudaFree(d_b); // Check for any CUDA errors checkCUDAError("cudaFree"); // free host memory pointer h_a // Bonus: be sure to use cudaFreeHost for memory allocated with cudaMallocHost cudaFreeHost(h_a); //free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err) ); exit(-1); } }
001c9ec91a21fd255308d22e2fbb911ac56a1876.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // #pragma once using namespace std; #define NUM_THREADS_PER_BLOCK 512 int* create_shifts (char* pattern); int linear_horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len, int myId); /* * Driver function * argv[0] is target pattern string * argv[1] is text path */ __global__ void horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len) { const int TABLE_SIZ = 126; int count = 0; int myId = threadIdx.x + blockDim.x * blockIdx.x; if(myId > num_chunks){ //if thread is an invalid thread return; } int text_length = (chunk_size * myId) + chunk_size + pat_len - 1; // don't need to check first pattern_length - 1 characters int i = (myId*chunk_size) + pat_len - 1; int k = 0; while(i < text_length) { // reset matched character count k = 0; if (i >= text_size) { // break out if i tries to step past text length break; } if (text[i] >= TABLE_SIZ || text[i] < 0) { // move to next char if unknown char (Unicode, etc.) ++i; } else { while(k <= pat_len - 1 && pattern[pat_len - 1 - k] == text[i - k]) { // increment matched character count k++; } if(k == pat_len) { // increment pattern count, text index ++count; ++i; } else { // add on shift if known char i = i + shift_table[text[i]]; } } } atomicAdd(num_matches, count); }
001c9ec91a21fd255308d22e2fbb911ac56a1876.cu
#include "includes.h" // #pragma once using namespace std; #define NUM_THREADS_PER_BLOCK 512 int* create_shifts (char* pattern); int linear_horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len, int myId); /* * Driver function * argv[0] is target pattern string * argv[1] is text path */ __global__ void horspool_match (char* text, char* pattern, int* shift_table, unsigned int* num_matches, int chunk_size, int num_chunks, int text_size, int pat_len) { const int TABLE_SIZ = 126; int count = 0; int myId = threadIdx.x + blockDim.x * blockIdx.x; if(myId > num_chunks){ //if thread is an invalid thread return; } int text_length = (chunk_size * myId) + chunk_size + pat_len - 1; // don't need to check first pattern_length - 1 characters int i = (myId*chunk_size) + pat_len - 1; int k = 0; while(i < text_length) { // reset matched character count k = 0; if (i >= text_size) { // break out if i tries to step past text length break; } if (text[i] >= TABLE_SIZ || text[i] < 0) { // move to next char if unknown char (Unicode, etc.) ++i; } else { while(k <= pat_len - 1 && pattern[pat_len - 1 - k] == text[i - k]) { // increment matched character count k++; } if(k == pat_len) { // increment pattern count, text index ++count; ++i; } else { // add on shift if known char i = i + shift_table[text[i]]; } } } atomicAdd(num_matches, count); }
eea09318c0bcb664d6e88f7c4a8cddedb326258a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "hip/hip_runtime.h" #include "hip/device_functions.h" #include "device_launch_parameters.h" #include <fstream> #include <assert.h> #include <cmath> #include <vector> #include "rocblas.h" #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <utility> #include <hipsparse.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "hipsparse.h" #include "cusolver_common.h" #include "cusolverDn.h" #include "math_functions.h" #include <algorithm> #include <Eigen/Cholesky> #include <Eigen/Core> #include <Eigen/LU> #include <Eigen/Sparse> #include <Eigen/SparseCholesky> #include <windows.h> #include "CudaIncompleteCholesky.h" //#define CLEANUP(s) \ //do { \ // printf ("%s\n", s); \ // if (yHostPtr) free(yHostPtr); \ // if (zHostPtr) free(zHostPtr); \ // if (xIndHostPtr) free(xIndHostPtr); \ // if (xValHostPtr) free(xValHostPtr); \ // if (cooRowIndexHostPtr) free(cooRowIndexHostPtr);\ // if (cooColIndexHostPtr) free(cooColIndexHostPtr);\ // if (cooValHostPtr) free(cooValHostPtr); \ // if (y) hipFree(y); \ // if (z) hipFree(z); \ // if (xInd) hipFree(xInd); \ // if (xVal) hipFree(xVal); \ // if (csrRowPtr) hipFree(csrRowPtr); \ // if (cooRowIndex) hipFree(cooRowIndex); \ // if (cooColIndex) hipFree(cooColIndex); \ // if (cooVal) hipFree(cooVal); \ // if (descr) hipsparseDestroyMatDescr(descr);\ // if (handle) hipsparseDestroy(handle); \ // hipDeviceReset(); \ // fflush (stdout); \ //} while (0) //hipError_t cudaStat1, cudaStat2, cudaStat3, cudaStat4, cudaStat5, cudaStat6; //hipsparseStatus_t status; //hipsparseHandle_t handle = 0; //int linearSolverCHOL( // hipsolverDnHandle_t handle, // int n, // const double* Acopy, // int lda, // const double* b, // double* x) //{ // int bufferSize = 0; // int* info = NULL; // double* buffer = NULL; // double* A = NULL; // int h_info = 0; // double start, stop; // double time_solve; // hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER; // // hipsolverDnDpotrf_bufferSize(handle, uplo, n, (double*)Acopy, lda, &bufferSize); // // hipMalloc(&info, sizeof(int))); // hipMalloc(&buffer, sizeof(double) * bufferSize)); // hipMalloc(&A, sizeof(double) * lda * n); // // // // prepare a copy of A because potrf will overwrite A with L // hipMemcpy(A, Acopy, sizeof(double) * lda * n, hipMemcpyDeviceToDevice); // hipMemset(info, 0, sizeof(int)); // // start = second(); // start = second(); // // hipsolverDnDpotrf(handle, uplo, n, A, lda, buffer, bufferSize, info); // // hipMemcpy(&h_info, info, sizeof(int), hipMemcpyDeviceToHost); // // if (0 != h_info) { // fprintf(stderr, "Error: Cholesky factorization failed\n"); // } // // hipMemcpy(x, b, sizeof(double) * n, hipMemcpyDeviceToDevice); // // hipsolverDnDpotrs(handle, uplo, n, 1, A, lda, x, n, info); // // hipDeviceSynchronize(); // stop = second(); // // time_solve = stop - start; // fprintf(stdout, "timing: cholesky = %10.6f sec\n", time_solve); // // if (info) { hipFree(info); } // if (buffer) { hipFree(buffer); } // if (A) { hipFree(A); } // // return 0; //} //__global__ void print(int * pos, int size) { // for (int i = 0; i < size; i++) // printf("pos %d %d\n", i, pos[i]); //} //__global__ void print(double* pos, int size) { // for (int i = 0; i < size; i++) // printf("pos %d %f\n", i, pos[i]); //} using namespace std; using namespace Eigen; string fileName; int main() { int n, nnz; cin >> fileName; ifstream ifs(fileName); //getchar(); ifs >> n >> nnz; vector<Triplet<double>> Q; int a, b; double z; for (int i = 0; i < nnz; i++) { ifs >> a >> b >> z; // cout << a << b << z << endl; Q.push_back(Triplet<double>(a, b, z)); // cout << Q[i] << endl; } SparseMatrix<double> A(n, n); A.setFromTriplets(Q.begin(), Q.end()); VectorXd B(n), X(n); CudaIncompleteCholesky::CudaIncompleteCholesky<double> solver; for (int i = 0; i < n; i++) { ifs >> B(i); } LARGE_INTEGER t1, t2, tc; QueryPerformanceFrequency(&tc); QueryPerformanceCounter(&t1); solver.compute(A); X = solver.solve(B); QueryPerformanceCounter(&t2); printf("Use Time:%f\n", (t2.QuadPart - t1.QuadPart) * 1.0 / tc.QuadPart); std::cout << "Linear system solvers comparison " << std::endl; std::cout << " Relative error |Ax - b| / |b| " << std::endl; double relative_error_llt = (A * X - B).norm() / B.norm(); cout << relative_error_llt << endl; } //Eigen::SparseMatrix<double> A; //Eigen::SparseVector<double> B; //void eigenOpen(int* &csrRowPtr, int* &csrColInd, double* &csrVal, double* &x, int& n, int& nnz){ // using namespace std; // using namespace Eigen; // string fileName; // cin >> fileName; // ifstream ifs(fileName); // //cout << ifs.is_open() << endl; // //getchar(); // ifs >> n >> nnz; // vector<Triplet<double>> Q; // int a, b; // double z; // x = new double[n]; // for (int i = 0; i < nnz; i++) { // ifs >> a >> b >> z; // Q.push_back(Triplet<double>(a, b, z)); // } // A = SparseMatrix<double>(n, n); // B = SparseVector<double>(n); // A.setFromTriplets(Q.begin(), Q.end()); // csrRowPtr = new int[n + 1]; // csrColInd = new int[nnz]; // csrVal = new double[nnz]; // hipMemcpy(csrVal, A.valuePtr(), sizeof(double) * nnz, hipMemcpyHostToHost); // hipMemcpy(csrRowPtr, A.outerIndexPtr(), sizeof(int) * (n + 1), hipMemcpyHostToHost); // hipMemcpy(csrColInd, A.innerIndexPtr(), sizeof(int) * nnz, hipMemcpyHostToHost); // for (int i = 0; i < n; i++) { // ifs >> x[i]; // B.insert(i) = x[i]; // } // //cout << B << endl; // //getchar(); //} // //double errorCheck(double* y, int size) { // using namespace Eigen; // SparseVector<double> X(size); // for (int i = 0; i < size; i++) { // X.insert(i) = y[i]; // } // double error = (A * X - B).norm() / B.norm(); // return error; //} // //int main() { // hipsparseHandle_t handle; // hipsparseStatus_t status = hipsparseCreate(&handle); // int *csrRowPtr,*csrColInd, *d_csrRowPtr, *d_csrColInd; // int m = 3, nnz = 3; // double* d_csrVal, * csrVal, *x, *y, *z, *d_x, *d_y, *d_z; // // //csrRowPtr = new int[m+1]; // //csrColInd = new int[nnz]; // //csrVal = new double[nnz]; // //x = new double[m]; // //for (int i = 0; i < m; i++) { // // csrVal[i] = 2; // // csrRowPtr[i] = i; // // csrColInd[i] = i; // // x[i] = 2*(i+1); // //} // //csrRowPtr[m] = nnz; // eigenOpen(csrRowPtr, csrColInd, csrVal, x, m, nnz); // //printf("%d %d\n", csrRowPtr[0], csrRowPtr[1]); // hipMalloc((void**)& d_csrRowPtr, sizeof(int)*(m+1)); // hipMalloc((void**)& d_csrColInd, sizeof(int)* nnz); // hipMalloc((void**)& d_csrVal, sizeof(double)*nnz); // hipMalloc((void**)& d_x, sizeof(double) * m); // hipMalloc((void**)& d_y, sizeof(double) * m); // hipMalloc((void**)& d_z, sizeof(double) * m); // hipMemcpy(d_csrRowPtr, csrRowPtr, sizeof(int) * (m + 1), hipMemcpyHostToDevice); // // hipMemcpy(d_csrColInd, csrColInd, sizeof(int) * nnz, hipMemcpyHostToDevice); // hipMemcpy(d_csrVal, csrVal, sizeof(double) * nnz, hipMemcpyHostToDevice); // hipMemcpy(d_x, x, sizeof(double) * m, hipMemcpyHostToDevice); // //print << <1, 1 >> > (d_csrRowPtr, m + 1); // //print << <1, 1 >> > (d_csrVal, nnz); // //print << <1, 1 >> > (d_csrColInd, nnz); // // Suppose that A is m x m sparse matrix represented by CSR format, // // Assumption: // // - handle is already created by hipsparseCreate(), // // - (d_csrRowPtr, d_csrColInd, d_csrVal) is CSR of A on device memory, // // - d_x is right hand side vector on device memory, // // - d_y is solution vector on device memory. // // - d_z is intermediate result on device memory. // // hipsparseMatDescr_t descr_M = 0; // hipsparseMatDescr_t descr_L = 0; // csric02Info_t info_M = 0; // csrsv2Info_t info_L = 0; // csrsv2Info_t info_Lt = 0; // int pBufferSize_M; // int pBufferSize_L; // int pBufferSize_Lt; // int pBufferSize; // void* pBuffer = 0; // int structural_zero; // int numerical_zero; // const double alpha = 1.; // const hipsparseSolvePolicy_t policy_M = HIPSPARSE_SOLVE_POLICY_NO_LEVEL; // const hipsparseSolvePolicy_t policy_L = HIPSPARSE_SOLVE_POLICY_NO_LEVEL; // const hipsparseSolvePolicy_t policy_Lt = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; // const hipsparseOperation_t trans_L = HIPSPARSE_OPERATION_NON_TRANSPOSE; // const hipsparseOperation_t trans_Lt = HIPSPARSE_OPERATION_TRANSPOSE; // // // step 1: create a descriptor which contains // // - matrix M is base-1 // // - matrix L is base-1 // // - matrix L is lower triangular // // - matrix L has non-unit diagonal // LARGE_INTEGER t1, t2, tc; // QueryPerformanceFrequency(&tc); // QueryPerformanceCounter(&t1); // // hipsparseCreateMatDescr(&descr_M); // hipsparseSetMatIndexBase(descr_M, HIPSPARSE_INDEX_BASE_ZERO); // hipsparseSetMatType(descr_M, HIPSPARSE_MATRIX_TYPE_GENERAL); // // hipsparseCreateMatDescr(&descr_L); // hipsparseSetMatIndexBase(descr_L, HIPSPARSE_INDEX_BASE_ZERO); // hipsparseSetMatType(descr_L, HIPSPARSE_MATRIX_TYPE_GENERAL); // hipsparseSetMatFillMode(descr_L, HIPSPARSE_FILL_MODE_LOWER); // hipsparseSetMatDiagType(descr_L, HIPSPARSE_DIAG_TYPE_NON_UNIT); // // // step 2: create a empty info structure // // we need one info for csric02 and two info's for csrsv2 // hipsparseCreateCsric02Info(&info_M); // hipsparseCreateCsrsv2Info(&info_L); // hipsparseCreateCsrsv2Info(&info_Lt); // // // step 3: query how much memory used in csric02 and csrsv2, and allocate the buffer // hipsparseDcsric02_bufferSize(handle, m, nnz, // descr_M, d_csrVal, d_csrRowPtr, d_csrColInd, info_M, &pBufferSize_M); // hipsparseDcsrsv2_bufferSize(handle, trans_L, m, nnz, // descr_L, d_csrVal, d_csrRowPtr, d_csrColInd, info_L, &pBufferSize_L); // hipsparseDcsrsv2_bufferSize(handle, trans_Lt, m, nnz, // descr_L, d_csrVal, d_csrRowPtr, d_csrColInd, info_Lt, &pBufferSize_Lt); // // pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_Lt)); // // // pBuffer returned by hipMalloc is automatically aligned to 128 bytes. // hipMalloc((void**)& pBuffer, pBufferSize); // // // step 4: perform analysis of incomplete Cholesky on M // // perform analysis of triangular solve on L // // perform analysis of triangular solve on L' // // The lower triangular part of M has the same sparsity pattern as L, so // // we can do analysis of csric02 and csrsv2 simultaneously. // // hipsparseDcsric02_analysis(handle, m, nnz, descr_M, // d_csrVal, d_csrRowPtr, d_csrColInd, info_M, // policy_M, pBuffer); // status = hipsparseXcsric02_zeroPivot(handle, info_M, &structural_zero); // if (HIPSPARSE_STATUS_ZERO_PIVOT == status) { // printf("A(%d,%d) is missing\n", structural_zero, structural_zero); // } // // hipsparseDcsrsv2_analysis(handle, trans_L, m, nnz, descr_L, // d_csrVal, d_csrRowPtr, d_csrColInd, // info_L, policy_L, pBuffer); // // hipsparseDcsrsv2_analysis(handle, trans_Lt, m, nnz, descr_L, // d_csrVal, d_csrRowPtr, d_csrColInd, // info_Lt, policy_Lt, pBuffer); // // // step 5: M = L * L' // hipsparseDcsric02(handle, m, nnz, descr_M, // d_csrVal, d_csrRowPtr, d_csrColInd, info_M, policy_M, pBuffer); // status = hipsparseXcsric02_zeroPivot(handle, info_M, &numerical_zero); // //print << <1, 1 >> > (d_csrRowPtr, m + 1); // //print << <1, 1 >> > (d_csrColInd, m); // //print << <1, 1 >> > (d_csrVal, m); // if (HIPSPARSE_STATUS_ZERO_PIVOT == status) { // printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); // } // // // step 6: solve L*z = x // hipsparseDcsrsv2_solve(handle, trans_L, m, nnz, &alpha, descr_L, // d_csrVal, d_csrRowPtr, d_csrColInd, info_L, // d_x, d_z, policy_L, pBuffer); // // // step 7: solve L'*y = z // hipsparseDcsrsv2_solve(handle, trans_Lt, m, nnz, &alpha, descr_L, // d_csrVal, d_csrRowPtr, d_csrColInd, info_Lt, // d_z, d_y, policy_Lt, pBuffer); // hipDeviceSynchronize(); // // QueryPerformanceCounter(&t2); // printf("Use Time:%f\n", (t2.QuadPart - t1.QuadPart) * 1.0 / tc.QuadPart); // //print << <1, 1 >> > (d_y, m); // y = new double[m]; // hipMemcpy(y, d_y, m * sizeof(double), hipMemcpyDeviceToHost); // double error=errorCheck(y, m); // std::cout << error << std::endl; // // step 6: free resources // hipFree(pBuffer); // hipsparseDestroyMatDescr(descr_M); // hipsparseDestroyMatDescr(descr_L); // hipsparseDestroyCsric02Info(info_M); // hipsparseDestroyCsrsv2Info(info_L); // hipsparseDestroyCsrsv2Info(info_Lt); // hipsparseDestroy(handle); //}
eea09318c0bcb664d6e88f7c4a8cddedb326258a.cu
#include <iostream> #include "cuda_runtime.h" #include "device_functions.h" #include "device_launch_parameters.h" #include <fstream> #include <assert.h> #include <cmath> #include <vector> #include "cublas_v2.h" #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <utility> #include <cusparse.h> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "cusparse.h" #include "cusolver_common.h" #include "cusolverDn.h" #include "math_functions.h" #include <algorithm> #include <Eigen/Cholesky> #include <Eigen/Core> #include <Eigen/LU> #include <Eigen/Sparse> #include <Eigen/SparseCholesky> #include <windows.h> #include "CudaIncompleteCholesky.h" //#define CLEANUP(s) \ //do { \ // printf ("%s\n", s); \ // if (yHostPtr) free(yHostPtr); \ // if (zHostPtr) free(zHostPtr); \ // if (xIndHostPtr) free(xIndHostPtr); \ // if (xValHostPtr) free(xValHostPtr); \ // if (cooRowIndexHostPtr) free(cooRowIndexHostPtr);\ // if (cooColIndexHostPtr) free(cooColIndexHostPtr);\ // if (cooValHostPtr) free(cooValHostPtr); \ // if (y) cudaFree(y); \ // if (z) cudaFree(z); \ // if (xInd) cudaFree(xInd); \ // if (xVal) cudaFree(xVal); \ // if (csrRowPtr) cudaFree(csrRowPtr); \ // if (cooRowIndex) cudaFree(cooRowIndex); \ // if (cooColIndex) cudaFree(cooColIndex); \ // if (cooVal) cudaFree(cooVal); \ // if (descr) cusparseDestroyMatDescr(descr);\ // if (handle) cusparseDestroy(handle); \ // cudaDeviceReset(); \ // fflush (stdout); \ //} while (0) //cudaError_t cudaStat1, cudaStat2, cudaStat3, cudaStat4, cudaStat5, cudaStat6; //cusparseStatus_t status; //cusparseHandle_t handle = 0; //int linearSolverCHOL( // cusolverDnHandle_t handle, // int n, // const double* Acopy, // int lda, // const double* b, // double* x) //{ // int bufferSize = 0; // int* info = NULL; // double* buffer = NULL; // double* A = NULL; // int h_info = 0; // double start, stop; // double time_solve; // cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER; // // cusolverDnDpotrf_bufferSize(handle, uplo, n, (double*)Acopy, lda, &bufferSize); // // cudaMalloc(&info, sizeof(int))); // cudaMalloc(&buffer, sizeof(double) * bufferSize)); // cudaMalloc(&A, sizeof(double) * lda * n); // // // // prepare a copy of A because potrf will overwrite A with L // cudaMemcpy(A, Acopy, sizeof(double) * lda * n, cudaMemcpyDeviceToDevice); // cudaMemset(info, 0, sizeof(int)); // // start = second(); // start = second(); // // cusolverDnDpotrf(handle, uplo, n, A, lda, buffer, bufferSize, info); // // cudaMemcpy(&h_info, info, sizeof(int), cudaMemcpyDeviceToHost); // // if (0 != h_info) { // fprintf(stderr, "Error: Cholesky factorization failed\n"); // } // // cudaMemcpy(x, b, sizeof(double) * n, cudaMemcpyDeviceToDevice); // // cusolverDnDpotrs(handle, uplo, n, 1, A, lda, x, n, info); // // cudaDeviceSynchronize(); // stop = second(); // // time_solve = stop - start; // fprintf(stdout, "timing: cholesky = %10.6f sec\n", time_solve); // // if (info) { cudaFree(info); } // if (buffer) { cudaFree(buffer); } // if (A) { cudaFree(A); } // // return 0; //} //__global__ void print(int * pos, int size) { // for (int i = 0; i < size; i++) // printf("pos %d %d\n", i, pos[i]); //} //__global__ void print(double* pos, int size) { // for (int i = 0; i < size; i++) // printf("pos %d %f\n", i, pos[i]); //} using namespace std; using namespace Eigen; string fileName; int main() { int n, nnz; cin >> fileName; ifstream ifs(fileName); //getchar(); ifs >> n >> nnz; vector<Triplet<double>> Q; int a, b; double z; for (int i = 0; i < nnz; i++) { ifs >> a >> b >> z; // cout << a << b << z << endl; Q.push_back(Triplet<double>(a, b, z)); // cout << Q[i] << endl; } SparseMatrix<double> A(n, n); A.setFromTriplets(Q.begin(), Q.end()); VectorXd B(n), X(n); CudaIncompleteCholesky::CudaIncompleteCholesky<double> solver; for (int i = 0; i < n; i++) { ifs >> B(i); } LARGE_INTEGER t1, t2, tc; QueryPerformanceFrequency(&tc); QueryPerformanceCounter(&t1); solver.compute(A); X = solver.solve(B); QueryPerformanceCounter(&t2); printf("Use Time:%f\n", (t2.QuadPart - t1.QuadPart) * 1.0 / tc.QuadPart); std::cout << "Linear system solvers comparison " << std::endl; std::cout << " Relative error |Ax - b| / |b| " << std::endl; double relative_error_llt = (A * X - B).norm() / B.norm(); cout << relative_error_llt << endl; } //Eigen::SparseMatrix<double> A; //Eigen::SparseVector<double> B; //void eigenOpen(int* &csrRowPtr, int* &csrColInd, double* &csrVal, double* &x, int& n, int& nnz){ // using namespace std; // using namespace Eigen; // string fileName; // cin >> fileName; // ifstream ifs(fileName); // //cout << ifs.is_open() << endl; // //getchar(); // ifs >> n >> nnz; // vector<Triplet<double>> Q; // int a, b; // double z; // x = new double[n]; // for (int i = 0; i < nnz; i++) { // ifs >> a >> b >> z; // Q.push_back(Triplet<double>(a, b, z)); // } // A = SparseMatrix<double>(n, n); // B = SparseVector<double>(n); // A.setFromTriplets(Q.begin(), Q.end()); // csrRowPtr = new int[n + 1]; // csrColInd = new int[nnz]; // csrVal = new double[nnz]; // cudaMemcpy(csrVal, A.valuePtr(), sizeof(double) * nnz, cudaMemcpyHostToHost); // cudaMemcpy(csrRowPtr, A.outerIndexPtr(), sizeof(int) * (n + 1), cudaMemcpyHostToHost); // cudaMemcpy(csrColInd, A.innerIndexPtr(), sizeof(int) * nnz, cudaMemcpyHostToHost); // for (int i = 0; i < n; i++) { // ifs >> x[i]; // B.insert(i) = x[i]; // } // //cout << B << endl; // //getchar(); //} // //double errorCheck(double* y, int size) { // using namespace Eigen; // SparseVector<double> X(size); // for (int i = 0; i < size; i++) { // X.insert(i) = y[i]; // } // double error = (A * X - B).norm() / B.norm(); // return error; //} // //int main() { // cusparseHandle_t handle; // cusparseStatus_t status = cusparseCreate(&handle); // int *csrRowPtr,*csrColInd, *d_csrRowPtr, *d_csrColInd; // int m = 3, nnz = 3; // double* d_csrVal, * csrVal, *x, *y, *z, *d_x, *d_y, *d_z; // // //csrRowPtr = new int[m+1]; // //csrColInd = new int[nnz]; // //csrVal = new double[nnz]; // //x = new double[m]; // //for (int i = 0; i < m; i++) { // // csrVal[i] = 2; // // csrRowPtr[i] = i; // // csrColInd[i] = i; // // x[i] = 2*(i+1); // //} // //csrRowPtr[m] = nnz; // eigenOpen(csrRowPtr, csrColInd, csrVal, x, m, nnz); // //printf("%d %d\n", csrRowPtr[0], csrRowPtr[1]); // cudaMalloc((void**)& d_csrRowPtr, sizeof(int)*(m+1)); // cudaMalloc((void**)& d_csrColInd, sizeof(int)* nnz); // cudaMalloc((void**)& d_csrVal, sizeof(double)*nnz); // cudaMalloc((void**)& d_x, sizeof(double) * m); // cudaMalloc((void**)& d_y, sizeof(double) * m); // cudaMalloc((void**)& d_z, sizeof(double) * m); // cudaMemcpy(d_csrRowPtr, csrRowPtr, sizeof(int) * (m + 1), cudaMemcpyHostToDevice); // // cudaMemcpy(d_csrColInd, csrColInd, sizeof(int) * nnz, cudaMemcpyHostToDevice); // cudaMemcpy(d_csrVal, csrVal, sizeof(double) * nnz, cudaMemcpyHostToDevice); // cudaMemcpy(d_x, x, sizeof(double) * m, cudaMemcpyHostToDevice); // //print << <1, 1 >> > (d_csrRowPtr, m + 1); // //print << <1, 1 >> > (d_csrVal, nnz); // //print << <1, 1 >> > (d_csrColInd, nnz); // // Suppose that A is m x m sparse matrix represented by CSR format, // // Assumption: // // - handle is already created by cusparseCreate(), // // - (d_csrRowPtr, d_csrColInd, d_csrVal) is CSR of A on device memory, // // - d_x is right hand side vector on device memory, // // - d_y is solution vector on device memory. // // - d_z is intermediate result on device memory. // // cusparseMatDescr_t descr_M = 0; // cusparseMatDescr_t descr_L = 0; // csric02Info_t info_M = 0; // csrsv2Info_t info_L = 0; // csrsv2Info_t info_Lt = 0; // int pBufferSize_M; // int pBufferSize_L; // int pBufferSize_Lt; // int pBufferSize; // void* pBuffer = 0; // int structural_zero; // int numerical_zero; // const double alpha = 1.; // const cusparseSolvePolicy_t policy_M = CUSPARSE_SOLVE_POLICY_NO_LEVEL; // const cusparseSolvePolicy_t policy_L = CUSPARSE_SOLVE_POLICY_NO_LEVEL; // const cusparseSolvePolicy_t policy_Lt = CUSPARSE_SOLVE_POLICY_USE_LEVEL; // const cusparseOperation_t trans_L = CUSPARSE_OPERATION_NON_TRANSPOSE; // const cusparseOperation_t trans_Lt = CUSPARSE_OPERATION_TRANSPOSE; // // // step 1: create a descriptor which contains // // - matrix M is base-1 // // - matrix L is base-1 // // - matrix L is lower triangular // // - matrix L has non-unit diagonal // LARGE_INTEGER t1, t2, tc; // QueryPerformanceFrequency(&tc); // QueryPerformanceCounter(&t1); // // cusparseCreateMatDescr(&descr_M); // cusparseSetMatIndexBase(descr_M, CUSPARSE_INDEX_BASE_ZERO); // cusparseSetMatType(descr_M, CUSPARSE_MATRIX_TYPE_GENERAL); // // cusparseCreateMatDescr(&descr_L); // cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ZERO); // cusparseSetMatType(descr_L, CUSPARSE_MATRIX_TYPE_GENERAL); // cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER); // cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_NON_UNIT); // // // step 2: create a empty info structure // // we need one info for csric02 and two info's for csrsv2 // cusparseCreateCsric02Info(&info_M); // cusparseCreateCsrsv2Info(&info_L); // cusparseCreateCsrsv2Info(&info_Lt); // // // step 3: query how much memory used in csric02 and csrsv2, and allocate the buffer // cusparseDcsric02_bufferSize(handle, m, nnz, // descr_M, d_csrVal, d_csrRowPtr, d_csrColInd, info_M, &pBufferSize_M); // cusparseDcsrsv2_bufferSize(handle, trans_L, m, nnz, // descr_L, d_csrVal, d_csrRowPtr, d_csrColInd, info_L, &pBufferSize_L); // cusparseDcsrsv2_bufferSize(handle, trans_Lt, m, nnz, // descr_L, d_csrVal, d_csrRowPtr, d_csrColInd, info_Lt, &pBufferSize_Lt); // // pBufferSize = max(pBufferSize_M, max(pBufferSize_L, pBufferSize_Lt)); // // // pBuffer returned by cudaMalloc is automatically aligned to 128 bytes. // cudaMalloc((void**)& pBuffer, pBufferSize); // // // step 4: perform analysis of incomplete Cholesky on M // // perform analysis of triangular solve on L // // perform analysis of triangular solve on L' // // The lower triangular part of M has the same sparsity pattern as L, so // // we can do analysis of csric02 and csrsv2 simultaneously. // // cusparseDcsric02_analysis(handle, m, nnz, descr_M, // d_csrVal, d_csrRowPtr, d_csrColInd, info_M, // policy_M, pBuffer); // status = cusparseXcsric02_zeroPivot(handle, info_M, &structural_zero); // if (CUSPARSE_STATUS_ZERO_PIVOT == status) { // printf("A(%d,%d) is missing\n", structural_zero, structural_zero); // } // // cusparseDcsrsv2_analysis(handle, trans_L, m, nnz, descr_L, // d_csrVal, d_csrRowPtr, d_csrColInd, // info_L, policy_L, pBuffer); // // cusparseDcsrsv2_analysis(handle, trans_Lt, m, nnz, descr_L, // d_csrVal, d_csrRowPtr, d_csrColInd, // info_Lt, policy_Lt, pBuffer); // // // step 5: M = L * L' // cusparseDcsric02(handle, m, nnz, descr_M, // d_csrVal, d_csrRowPtr, d_csrColInd, info_M, policy_M, pBuffer); // status = cusparseXcsric02_zeroPivot(handle, info_M, &numerical_zero); // //print << <1, 1 >> > (d_csrRowPtr, m + 1); // //print << <1, 1 >> > (d_csrColInd, m); // //print << <1, 1 >> > (d_csrVal, m); // if (CUSPARSE_STATUS_ZERO_PIVOT == status) { // printf("L(%d,%d) is zero\n", numerical_zero, numerical_zero); // } // // // step 6: solve L*z = x // cusparseDcsrsv2_solve(handle, trans_L, m, nnz, &alpha, descr_L, // d_csrVal, d_csrRowPtr, d_csrColInd, info_L, // d_x, d_z, policy_L, pBuffer); // // // step 7: solve L'*y = z // cusparseDcsrsv2_solve(handle, trans_Lt, m, nnz, &alpha, descr_L, // d_csrVal, d_csrRowPtr, d_csrColInd, info_Lt, // d_z, d_y, policy_Lt, pBuffer); // cudaThreadSynchronize(); // // QueryPerformanceCounter(&t2); // printf("Use Time:%f\n", (t2.QuadPart - t1.QuadPart) * 1.0 / tc.QuadPart); // //print << <1, 1 >> > (d_y, m); // y = new double[m]; // cudaMemcpy(y, d_y, m * sizeof(double), cudaMemcpyDeviceToHost); // double error=errorCheck(y, m); // std::cout << error << std::endl; // // step 6: free resources // cudaFree(pBuffer); // cusparseDestroyMatDescr(descr_M); // cusparseDestroyMatDescr(descr_L); // cusparseDestroyCsric02Info(info_M); // cusparseDestroyCsrsv2Info(info_L); // cusparseDestroyCsrsv2Info(info_Lt); // cusparseDestroy(handle); //}
583e9b2095b99161aa76f5372c434acfdd96ee33.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> #define TIMER_CREATE(t) \ hipEvent_t t##_start, t##_end; \ hipEventCreate(&t##_start); \ hipEventCreate(&t##_end); #define TIMER_START(t) \ hipEventRecord(t##_start); \ hipEventSynchronize(t##_start); \ #define TIMER_END(t) \ hipEventRecord(t##_end); \ hipEventSynchronize(t##_end); \ hipEventElapsedTime(&t, t##_start, t##_end); \ hipEventDestroy(t##_start); \ hipEventDestroy(t##_end); #define TILE_SIZE 16 #define BLOCK_SIZE_X 1024 #define BLOCK_SIZE_Y 1 #define BLOCK_SIZE_X2 1024 #define BLOCK_SIZE_Y2 1 #define CUDA_TIMING unsigned char *input_gpu; unsigned char *output_gpu; unsigned int *hist; unsigned char *lut; double CLOCK() { struct timespec t; clock_gettime(CLOCK_MONOTONIC, &t); return (t.tv_sec * 1000)+(t.tv_nsec*1e-6); } /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); exit(-1); } #endif return result; } // Add GPU kernel and functions __global__ void kernel(unsigned char *input, unsigned int imgSize, unsigned char *output){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int location = y*blockDim.x*gridDim.x+x; if (location<imgSize) output[location] = x%255; } __global__ void genHist(unsigned int *input, unsigned int width, unsigned int height, unsigned int *hist) { int x = blockIdx.x*blockDim.x + threadIdx.x; __shared__ unsigned int tempHist[256]; tempHist[threadIdx.x]=0; __syncthreads(); const unsigned int temp=input[x]; //Calculate Histogram atomicAdd(&tempHist[(temp & 0x000000FF)], 1); atomicAdd(&tempHist[(temp & 0x0000FF00) >> 8], 1); atomicAdd(&tempHist[(temp & 0x00FF0000) >> 16], 1); atomicAdd(&tempHist[(temp & 0xFF000000) >> 24], 1); __syncthreads(); atomicAdd(&(hist[threadIdx.x]),tempHist[threadIdx.x]); } __global__ void genHist2(unsigned char *input, int numPixel, unsigned int *hist){ int x = blockIdx.x*blockDim.x+threadIdx.x; //Generate new gray value if (x<numPixel){ atomicAdd(&hist[input[x]], 1); } } __global__ void genLUT( unsigned int *hist, float imgSize, unsigned char *lut){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int location = y*blockDim.x*gridDim.x+x; __shared__ unsigned int cdfHist[256]; __shared__ unsigned int tempHist[256]; __shared__ int mincdf; tempHist[location]=hist[location]; __syncthreads(); //Accumulate unsigned int cdfTemp=0; int i = location; do { cdfTemp += tempHist[i--]; } while (i >= 0); cdfHist[location]=cdfTemp; __syncthreads(); //Find minimum CDF if (threadIdx.x==0&&threadIdx.y==0) { int j=0; while (j<256 && cdfHist[j]==0) { ++j; } mincdf=j; } __syncthreads(); //Generate look-up table float lutf=0; if (location>mincdf) { lutf=255.0*(cdfHist[location]-cdfHist[mincdf])/(imgSize-cdfHist[mincdf]); } //Write look-up table lut[location]=(unsigned char)roundf(lutf); } __global__ void applyLUT(unsigned int *input, unsigned int width, unsigned char *lut, unsigned int *output){ int x = blockIdx.x*blockDim.x + threadIdx.x; __shared__ unsigned char lutTemp[256]; lutTemp[threadIdx.x]=lut[threadIdx.x]; __syncthreads(); unsigned int temp=input[x]; unsigned char temp1=lutTemp[(temp & 0xFF000000) >> 24]; unsigned char temp2=lutTemp[(temp & 0x00FF0000) >> 16]; unsigned char temp3=lutTemp[(temp & 0x0000FF00) >> 8]; unsigned char temp4=lutTemp[(temp & 0x000000FF)]; temp=(((unsigned int)temp1) << 24)+(((unsigned int)temp2) << 16)+(((unsigned int)temp3) << 8)+((unsigned int)temp4); output[x]=temp; } __global__ void applyLUT2(unsigned char *input, int numPixel, unsigned char *lut, unsigned char *output){ int x = blockIdx.x*blockDim.x+threadIdx.x; //Generate new gray value if (x<numPixel){ output[x]=lut[input[x]]; } } void histogram_gpu(unsigned char *data, unsigned int height, unsigned int width){ int gridXSize = width*height / BLOCK_SIZE_X; int gridYSize = 1; int gridXSize2 = width*height / BLOCK_SIZE_X2; int gridYSize2 = 1; int restPixel = width*height % BLOCK_SIZE_X2; int lutOffset = gridXSize2 * BLOCK_SIZE_X2; // Both are the same size (CPU/GPU). unsigned int size = height*width; // Allocate arrays in GPU memory checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(hipMalloc((void**)&hist , 256*sizeof(unsigned int))); checkCuda(hipMalloc((void**)&lut , 256*sizeof(unsigned char))); checkCuda(hipMemset(hist , 0 , 256*sizeof(unsigned int))); checkCuda(hipMemset(lut , 0 , 256*sizeof(unsigned char))); checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char))); // Copy data to GPU checkCuda(hipMemcpy(input_gpu, data, size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_SIZE_X/4, BLOCK_SIZE_Y); dim3 dimGridforLUT(1, 1); dim3 dimBlockforLUT(16, 16); dim3 dimGrid2(gridXSize2, gridYSize2); dim3 dimBlock2(BLOCK_SIZE_X2/4, BLOCK_SIZE_Y2); // Kernel Call #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif hipLaunchKernelGGL(( genHist), dim3(dimGrid), dim3(dimBlock), 0, 0, (unsigned int*)input_gpu, width, height, hist); if (restPixel != 0){ int gridXSize3 = (restPixel-1) / (BLOCK_SIZE_X2/4) + 1; int gridYSize3 = 1; dim3 dimGrid3(gridXSize3, gridYSize3); hipLaunchKernelGGL(( genHist2), dim3(dimGrid3), dim3(dimBlock), 0, 0, input_gpu+lutOffset, restPixel, hist); } hipLaunchKernelGGL(( genLUT), dim3(dimGridforLUT), dim3(dimBlockforLUT), 0, 0, hist, size, lut); hipLaunchKernelGGL(( applyLUT), dim3(dimGrid2), dim3(dimBlock2), 0, 0, (unsigned int*)input_gpu, width, lut, (unsigned int*)output_gpu); if (restPixel != 0){ int gridXSize3 = (restPixel-1) / (BLOCK_SIZE_X2/4) + 1; int gridYSize3 = 1; dim3 dimGrid3(gridXSize3, gridYSize3); hipLaunchKernelGGL(( applyLUT2), dim3(dimGrid3), dim3(dimBlock2), 0, 0, input_gpu+lutOffset, restPixel, lut, output_gpu+lutOffset); } checkCuda(hipDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(hipMemcpy(data, output_gpu, size*sizeof(unsigned char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(output_gpu)); checkCuda(hipFree(input_gpu)); checkCuda(hipFree(hist)); checkCuda(hipFree(lut)); } void histogram_gpu_warmup(unsigned char *data, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); // Both are the same size (CPU/GPU). unsigned int size = height*width; // Allocate arrays in GPU memory checkCuda(hipMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(hipMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(hipMemset(output_gpu , 0 , size*sizeof(unsigned char))); // Copy data to GPU checkCuda(hipMemcpy(input_gpu, data, size*sizeof(char), hipMemcpyHostToDevice)); checkCuda(hipDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(TILE_SIZE, TILE_SIZE); hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu, size, output_gpu); checkCuda(hipDeviceSynchronize()); // Retrieve results from the GPU checkCuda(hipMemcpy(data, output_gpu, size*sizeof(unsigned char), hipMemcpyDeviceToHost)); // Free resources and end the program checkCuda(hipFree(output_gpu)); checkCuda(hipFree(input_gpu)); }
583e9b2095b99161aa76f5372c434acfdd96ee33.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> #define TIMER_CREATE(t) \ cudaEvent_t t##_start, t##_end; \ cudaEventCreate(&t##_start); \ cudaEventCreate(&t##_end); #define TIMER_START(t) \ cudaEventRecord(t##_start); \ cudaEventSynchronize(t##_start); \ #define TIMER_END(t) \ cudaEventRecord(t##_end); \ cudaEventSynchronize(t##_end); \ cudaEventElapsedTime(&t, t##_start, t##_end); \ cudaEventDestroy(t##_start); \ cudaEventDestroy(t##_end); #define TILE_SIZE 16 #define BLOCK_SIZE_X 1024 #define BLOCK_SIZE_Y 1 #define BLOCK_SIZE_X2 1024 #define BLOCK_SIZE_Y2 1 #define CUDA_TIMING unsigned char *input_gpu; unsigned char *output_gpu; unsigned int *hist; unsigned char *lut; double CLOCK() { struct timespec t; clock_gettime(CLOCK_MONOTONIC, &t); return (t.tv_sec * 1000)+(t.tv_nsec*1e-6); } /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); exit(-1); } #endif return result; } // Add GPU kernel and functions __global__ void kernel(unsigned char *input, unsigned int imgSize, unsigned char *output){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int location = y*blockDim.x*gridDim.x+x; if (location<imgSize) output[location] = x%255; } __global__ void genHist(unsigned int *input, unsigned int width, unsigned int height, unsigned int *hist) { int x = blockIdx.x*blockDim.x + threadIdx.x; __shared__ unsigned int tempHist[256]; tempHist[threadIdx.x]=0; __syncthreads(); const unsigned int temp=input[x]; //Calculate Histogram atomicAdd(&tempHist[(temp & 0x000000FF)], 1); atomicAdd(&tempHist[(temp & 0x0000FF00) >> 8], 1); atomicAdd(&tempHist[(temp & 0x00FF0000) >> 16], 1); atomicAdd(&tempHist[(temp & 0xFF000000) >> 24], 1); __syncthreads(); atomicAdd(&(hist[threadIdx.x]),tempHist[threadIdx.x]); } __global__ void genHist2(unsigned char *input, int numPixel, unsigned int *hist){ int x = blockIdx.x*blockDim.x+threadIdx.x; //Generate new gray value if (x<numPixel){ atomicAdd(&hist[input[x]], 1); } } __global__ void genLUT( unsigned int *hist, float imgSize, unsigned char *lut){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int location = y*blockDim.x*gridDim.x+x; __shared__ unsigned int cdfHist[256]; __shared__ unsigned int tempHist[256]; __shared__ int mincdf; tempHist[location]=hist[location]; __syncthreads(); //Accumulate unsigned int cdfTemp=0; int i = location; do { cdfTemp += tempHist[i--]; } while (i >= 0); cdfHist[location]=cdfTemp; __syncthreads(); //Find minimum CDF if (threadIdx.x==0&&threadIdx.y==0) { int j=0; while (j<256 && cdfHist[j]==0) { ++j; } mincdf=j; } __syncthreads(); //Generate look-up table float lutf=0; if (location>mincdf) { lutf=255.0*(cdfHist[location]-cdfHist[mincdf])/(imgSize-cdfHist[mincdf]); } //Write look-up table lut[location]=(unsigned char)roundf(lutf); } __global__ void applyLUT(unsigned int *input, unsigned int width, unsigned char *lut, unsigned int *output){ int x = blockIdx.x*blockDim.x + threadIdx.x; __shared__ unsigned char lutTemp[256]; lutTemp[threadIdx.x]=lut[threadIdx.x]; __syncthreads(); unsigned int temp=input[x]; unsigned char temp1=lutTemp[(temp & 0xFF000000) >> 24]; unsigned char temp2=lutTemp[(temp & 0x00FF0000) >> 16]; unsigned char temp3=lutTemp[(temp & 0x0000FF00) >> 8]; unsigned char temp4=lutTemp[(temp & 0x000000FF)]; temp=(((unsigned int)temp1) << 24)+(((unsigned int)temp2) << 16)+(((unsigned int)temp3) << 8)+((unsigned int)temp4); output[x]=temp; } __global__ void applyLUT2(unsigned char *input, int numPixel, unsigned char *lut, unsigned char *output){ int x = blockIdx.x*blockDim.x+threadIdx.x; //Generate new gray value if (x<numPixel){ output[x]=lut[input[x]]; } } void histogram_gpu(unsigned char *data, unsigned int height, unsigned int width){ int gridXSize = width*height / BLOCK_SIZE_X; int gridYSize = 1; int gridXSize2 = width*height / BLOCK_SIZE_X2; int gridYSize2 = 1; int restPixel = width*height % BLOCK_SIZE_X2; int lutOffset = gridXSize2 * BLOCK_SIZE_X2; // Both are the same size (CPU/GPU). unsigned int size = height*width; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&hist , 256*sizeof(unsigned int))); checkCuda(cudaMalloc((void**)&lut , 256*sizeof(unsigned char))); checkCuda(cudaMemset(hist , 0 , 256*sizeof(unsigned int))); checkCuda(cudaMemset(lut , 0 , 256*sizeof(unsigned char))); checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char))); // Copy data to GPU checkCuda(cudaMemcpy(input_gpu, data, size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_SIZE_X/4, BLOCK_SIZE_Y); dim3 dimGridforLUT(1, 1); dim3 dimBlockforLUT(16, 16); dim3 dimGrid2(gridXSize2, gridYSize2); dim3 dimBlock2(BLOCK_SIZE_X2/4, BLOCK_SIZE_Y2); // Kernel Call #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif genHist<<<dimGrid, dimBlock>>>((unsigned int*)input_gpu, width, height, hist); if (restPixel != 0){ int gridXSize3 = (restPixel-1) / (BLOCK_SIZE_X2/4) + 1; int gridYSize3 = 1; dim3 dimGrid3(gridXSize3, gridYSize3); genHist2<<<dimGrid3, dimBlock>>>(input_gpu+lutOffset, restPixel, hist); } genLUT<<<dimGridforLUT, dimBlockforLUT>>>(hist, size, lut); applyLUT<<<dimGrid2, dimBlock2>>>((unsigned int*)input_gpu, width, lut, (unsigned int*)output_gpu); if (restPixel != 0){ int gridXSize3 = (restPixel-1) / (BLOCK_SIZE_X2/4) + 1; int gridYSize3 = 1; dim3 dimGrid3(gridXSize3, gridYSize3); applyLUT2<<<dimGrid3, dimBlock2>>>(input_gpu+lutOffset, restPixel, lut, output_gpu+lutOffset); } checkCuda(cudaDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(cudaMemcpy(data, output_gpu, size*sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(output_gpu)); checkCuda(cudaFree(input_gpu)); checkCuda(cudaFree(hist)); checkCuda(cudaFree(lut)); } void histogram_gpu_warmup(unsigned char *data, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); // Both are the same size (CPU/GPU). unsigned int size = height*width; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char))); // Copy data to GPU checkCuda(cudaMemcpy(input_gpu, data, size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(TILE_SIZE, TILE_SIZE); kernel<<<dimGrid, dimBlock>>>(input_gpu, size, output_gpu); checkCuda(cudaDeviceSynchronize()); // Retrieve results from the GPU checkCuda(cudaMemcpy(data, output_gpu, size*sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(output_gpu)); checkCuda(cudaFree(input_gpu)); }
c603a5f82487978b34a0d2fe36d24a939a50a0d7.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> using namespace std; __global__ void AddIntsCUDA (int *a, int *b) { a[0]+=b[0]; } int main() { int a = 5, b = 9; int *d_a, *d_b ; hipMalloc(&d_a,sizeof(int)); hipMalloc(&d_b,sizeof(int)); hipMemcpy(d_a,&a,sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_b,&b,sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( AddIntsCUDA), dim3(1), dim3(1), 0, 0, d_a,d_b); hipMemcpy(&a,d_a,sizeof(int),hipMemcpyDeviceToHost); cout <<"The answer is "<<a<<endl; return 0; }
c603a5f82487978b34a0d2fe36d24a939a50a0d7.cu
#include <iostream> #include <cuda.h> using namespace std; __global__ void AddIntsCUDA (int *a, int *b) { a[0]+=b[0]; } int main() { int a = 5, b = 9; int *d_a, *d_b ; cudaMalloc(&d_a,sizeof(int)); cudaMalloc(&d_b,sizeof(int)); cudaMemcpy(d_a,&a,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_b,&b,sizeof(int),cudaMemcpyHostToDevice); AddIntsCUDA<<<1, 1>>>(d_a,d_b); cudaMemcpy(&a,d_a,sizeof(int),cudaMemcpyDeviceToHost); cout <<"The answer is "<<a<<endl; return 0; }
05643e098ba45ae216ea0c7e251d746142047fb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zherk_fermi_batched_k32.cu normal z -> s, Fri Jan 30 19:00:10 2015 @author Jakub Kurzak @author Stan Tomov @author Mark Gates @author Azzam Haidar [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. These files are included multiple times, once for each transpose version. herk_stencil.cuh defines the GPU kernel (device function). herk_kernel_batched.cuh defines the GPU kernel (global function). The batched version uses herk_kernel_batched.cuh instead of herk_kernel.cuh. */ #include "common_magma.h" #include "commonblas_s.h" #define PRECISION_s /////////////////////////////////////////////////////////////////////////////////////////////////// #include "sgemm_fermi_kernels_batched_k32.h" /////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- SSYRK performs one of the symmetric rank k operations C := alpha*A*A**H + beta*C, or C := alpha*A**H*A + beta*C, where alpha and beta are real scalars, C is an n by n symmetric matrix and A is an n by k matrix in the first case and a k by n matrix in the second case. Parameters ---------- @param[in] uplo CHARACTER*1. On entry, uplo specifies whether the upper or lower triangular part of the array C is to be referenced as follows: uplo = 'U' or 'u' Only the upper triangular part of C is to be referenced. uplo = 'L' or 'l' Only the lower triangular part of C is to be referenced. @param[in] trans CHARACTER*1. On entry, trans specifies the operation to be performed as follows: trans = 'N' or 'n' C := alpha*A*A**H + beta*C. trans = 'C' or 'c' C := alpha*A**H*A + beta*C. @param[in] n INTEGER. On entry, specifies the order of the matrix C. N must be at least zero. @param[in] k INTEGER. On entry with trans = 'N' or 'n', k specifies the number of columns of the matrix A, and on entry with trans = 'C' or 'c', k specifies the number of rows of the matrix A. K must be at least zero. @param[in] alpha REAL On entry, ALPHA specifies the scalar alpha. @param[in] dA REAL array of DIMENSION ( ldda, ka ), where ka is k when trans = MagmaNoTrans, and is n otherwise. Before entry with trans = MagmaNoTrans, the leading m by k part of the array dA must contain the matrix dA, otherwise the leading k by m part of the array dA must contain the matrix dA. @param[in] ldda INTEGER. On entry, ldda specifies the first dimension of A as declared in the calling (sub) program. When trans = MagmaNoTrans then ldda must be at least max( 1, n ), otherwise ldda must be at least max( 1, k ). @param[in] beta REAL. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then dC need not be set on input. @param[in,out] dC REAL array of DIMENSION ( lddc, n ). Before entry with uplo = 'U' or 'u', the leading n by n upper triangular part of the array C must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of C is not referenced. On exit, the upper triangular part of the array C is overwritten by the upper triangular part of the updated matrix. Before entry with uplo = 'L' or 'l', the leading n by n lower triangular part of the array C must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of C is not referenced. On exit, the lower triangular part of the array C is overwritten by the lower triangular part of the updated matrix. Note that the imaginary parts of the diagonal elements need not be set, they are assumed to be zero, and on exit they are set to zero. @param[in] lddc INTEGER. On entry, lddc specifies the first dimension of dC as declared in the calling (sub) program. lddc must be at least max( 1, m ). @ingroup magma_sblas3 ********************************************************************/ extern "C" void magmablas_ssyrk_batched_k32( magma_uplo_t uplo, magma_trans_t trans, magma_int_t n, magma_int_t k, float alpha, float const * const * dA_array, magma_int_t ldda, float beta, float **dC_array, magma_int_t lddc, magma_int_t batchCount, magma_queue_t queue ) { float cbeta = MAGMA_S_MAKE( beta, 0. ); float calpha = MAGMA_S_MAKE( alpha, 0. ); magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -1; else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -2; else if ( n < 0 ) info = -3; else if ( k < 0 ) info = -4; else if ( trans == MagmaNoTrans ? ldda < n : ldda < k ) info = -7; else if ( lddc < n ) info = -10; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("not supported \n"); // TODO call cublas return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( n <= 0 || k <= 0 ) return; size_t offsetA = 0; int TransA = 0, TransB = 0, uploA = 0; if ( uplo == MagmaLower ) uploA = 1; else if ( uplo == MagmaUpper ) uploA = 2; if ( trans == MagmaNoTrans ) #if defined(PRECISION_z) || defined(PRECISION_c) TransB = 2; #else TransB = 1; #endif else if ( trans == MagmaTrans || trans == MagmaConjTrans) #if defined(PRECISION_z) || defined(PRECISION_c) TransA = 2; #else TransA = 1; #endif #ifdef TEXTURE_1D size_t sizeA = (size_t) ldda * (size_t) (!TransA ? k : n); size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ) { printf("not supported \n"); // TODO call cublas return; } // Set textures parameters tex_ref_A.normalized = false; tex_ref_A.filterMode = hipFilterModePoint; tex_ref_A.addressMode[0] = hipAddressModeClamp; // Bind A and B to texture references hipError_t err; err = hipBindTexture(&offsetA, tex_ref_A, dA_array[0], sizeA*sizeof(float)); if ( err != hipSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", hipGetErrorString(err), err ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(float); if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( (n - 1)/BLK_M_nt + 1, (n - 1)/BLK_N_nt + 1 , batchCount ); hipLaunchKernelGGL(( magmablas_s_herk_kernel_fermi_nt_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta, (int)offsetA, (int)offsetA ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( (n - 1)/BLK_M_nc + 1, (n - 1)/BLK_N_nc + 1 , batchCount ); hipLaunchKernelGGL(( magmablas_s_herk_kernel_fermi_nc_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta, (int)offsetA, (int)offsetA ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( (n - 1)/BLK_M_tn + 1, (n - 1)/BLK_N_tn + 1 , batchCount ); hipLaunchKernelGGL(( magmablas_s_herk_kernel_fermi_tn_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta, (int)offsetA, (int)offsetA ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( (n - 1)/BLK_M_cn + 1, (n - 1)/BLK_N_cn + 1 , batchCount ); hipLaunchKernelGGL(( magmablas_s_herk_kernel_fermi_cn_batched), dim3(dimGrid), dim3(dimBlock), 0, queue , uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta, (int)offsetA, (int)offsetA ); } #ifdef TEXTURE_1D hipUnbindTexture( tex_ref_A ); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////////
05643e098ba45ae216ea0c7e251d746142047fb1.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zherk_fermi_batched_k32.cu normal z -> s, Fri Jan 30 19:00:10 2015 @author Jakub Kurzak @author Stan Tomov @author Mark Gates @author Azzam Haidar [zcds]gemm_fermi.cu defines the CPU driver. [zcds]gemm_fermi_kernels.h defines the block sizes for each precision. gemm_stencil_defs.h defines types and functions for precision-independent code. These files are included multiple times, once for each transpose version. herk_stencil.cuh defines the GPU kernel (device function). herk_kernel_batched.cuh defines the GPU kernel (global function). The batched version uses herk_kernel_batched.cuh instead of herk_kernel.cuh. */ #include "common_magma.h" #include "commonblas_s.h" #define PRECISION_s /////////////////////////////////////////////////////////////////////////////////////////////////// #include "sgemm_fermi_kernels_batched_k32.h" /////////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- SSYRK performs one of the symmetric rank k operations C := alpha*A*A**H + beta*C, or C := alpha*A**H*A + beta*C, where alpha and beta are real scalars, C is an n by n symmetric matrix and A is an n by k matrix in the first case and a k by n matrix in the second case. Parameters ---------- @param[in] uplo CHARACTER*1. On entry, uplo specifies whether the upper or lower triangular part of the array C is to be referenced as follows: uplo = 'U' or 'u' Only the upper triangular part of C is to be referenced. uplo = 'L' or 'l' Only the lower triangular part of C is to be referenced. @param[in] trans CHARACTER*1. On entry, trans specifies the operation to be performed as follows: trans = 'N' or 'n' C := alpha*A*A**H + beta*C. trans = 'C' or 'c' C := alpha*A**H*A + beta*C. @param[in] n INTEGER. On entry, specifies the order of the matrix C. N must be at least zero. @param[in] k INTEGER. On entry with trans = 'N' or 'n', k specifies the number of columns of the matrix A, and on entry with trans = 'C' or 'c', k specifies the number of rows of the matrix A. K must be at least zero. @param[in] alpha REAL On entry, ALPHA specifies the scalar alpha. @param[in] dA REAL array of DIMENSION ( ldda, ka ), where ka is k when trans = MagmaNoTrans, and is n otherwise. Before entry with trans = MagmaNoTrans, the leading m by k part of the array dA must contain the matrix dA, otherwise the leading k by m part of the array dA must contain the matrix dA. @param[in] ldda INTEGER. On entry, ldda specifies the first dimension of A as declared in the calling (sub) program. When trans = MagmaNoTrans then ldda must be at least max( 1, n ), otherwise ldda must be at least max( 1, k ). @param[in] beta REAL. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then dC need not be set on input. @param[in,out] dC REAL array of DIMENSION ( lddc, n ). Before entry with uplo = 'U' or 'u', the leading n by n upper triangular part of the array C must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of C is not referenced. On exit, the upper triangular part of the array C is overwritten by the upper triangular part of the updated matrix. Before entry with uplo = 'L' or 'l', the leading n by n lower triangular part of the array C must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of C is not referenced. On exit, the lower triangular part of the array C is overwritten by the lower triangular part of the updated matrix. Note that the imaginary parts of the diagonal elements need not be set, they are assumed to be zero, and on exit they are set to zero. @param[in] lddc INTEGER. On entry, lddc specifies the first dimension of dC as declared in the calling (sub) program. lddc must be at least max( 1, m ). @ingroup magma_sblas3 ********************************************************************/ extern "C" void magmablas_ssyrk_batched_k32( magma_uplo_t uplo, magma_trans_t trans, magma_int_t n, magma_int_t k, float alpha, float const * const * dA_array, magma_int_t ldda, float beta, float **dC_array, magma_int_t lddc, magma_int_t batchCount, magma_queue_t queue ) { float cbeta = MAGMA_S_MAKE( beta, 0. ); float calpha = MAGMA_S_MAKE( alpha, 0. ); magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) info = -1; else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -2; else if ( n < 0 ) info = -3; else if ( k < 0 ) info = -4; else if ( trans == MagmaNoTrans ? ldda < n : ldda < k ) info = -7; else if ( lddc < n ) info = -10; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("not supported \n"); // TODO call cublas return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( n <= 0 || k <= 0 ) return; size_t offsetA = 0; int TransA = 0, TransB = 0, uploA = 0; if ( uplo == MagmaLower ) uploA = 1; else if ( uplo == MagmaUpper ) uploA = 2; if ( trans == MagmaNoTrans ) #if defined(PRECISION_z) || defined(PRECISION_c) TransB = 2; #else TransB = 1; #endif else if ( trans == MagmaTrans || trans == MagmaConjTrans) #if defined(PRECISION_z) || defined(PRECISION_c) TransA = 2; #else TransA = 1; #endif #ifdef TEXTURE_1D size_t sizeA = (size_t) ldda * (size_t) (!TransA ? k : n); size_t CUBLAS_MAX_1DBUF_SIZE = ((1 << 27) - 512); if ( sizeA >= CUBLAS_MAX_1DBUF_SIZE ) { printf("not supported \n"); // TODO call cublas return; } // Set textures parameters tex_ref_A.normalized = false; tex_ref_A.filterMode = cudaFilterModePoint; tex_ref_A.addressMode[0] = cudaAddressModeClamp; // Bind A and B to texture references cudaError_t err; err = cudaBindTexture(&offsetA, tex_ref_A, dA_array[0], sizeA*sizeof(float)); if ( err != cudaSuccess ) { fprintf( stderr, "cannot bind A to texture: %s (%d)\n", cudaGetErrorString(err), err ); return; } #endif // Set up grids dim3 dimBlock(DIM_X, DIM_Y); offsetA = offsetA/sizeof(float); if ( TransA == 0 && TransB == 1 ) { dim3 dimGrid( (n - 1)/BLK_M_nt + 1, (n - 1)/BLK_N_nt + 1 , batchCount ); magmablas_s_herk_kernel_fermi_nt_batched<<< dimGrid, dimBlock, 0, queue >>>( uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta, (int)offsetA, (int)offsetA ); } else if ( TransA == 0 && TransB == 2 ) { dim3 dimGrid( (n - 1)/BLK_M_nc + 1, (n - 1)/BLK_N_nc + 1 , batchCount ); magmablas_s_herk_kernel_fermi_nc_batched<<< dimGrid, dimBlock, 0, queue >>>( uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta, (int)offsetA, (int)offsetA ); } else if ( TransA == 1 && TransB == 0 ) { dim3 dimGrid( (n - 1)/BLK_M_tn + 1, (n - 1)/BLK_N_tn + 1 , batchCount ); magmablas_s_herk_kernel_fermi_tn_batched<<< dimGrid, dimBlock, 0, queue >>>( uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta, (int)offsetA, (int)offsetA ); } else if ( TransA == 2 && TransB == 0 ) { dim3 dimGrid( (n - 1)/BLK_M_cn + 1, (n - 1)/BLK_N_cn + 1 , batchCount ); magmablas_s_herk_kernel_fermi_cn_batched<<< dimGrid, dimBlock, 0, queue >>>( uploA, n, k, dA_array, ldda, dA_array, ldda, dC_array, lddc, calpha, cbeta, (int)offsetA, (int)offsetA ); } #ifdef TEXTURE_1D cudaUnbindTexture( tex_ref_A ); #endif } ///////////////////////////////////////////////////////////////////////////////////////////////////
bdc9bddffb7203107ff8ecad6b62b28286374ba6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <optix.h> #include "optixMultiGPU.h" #include <sutil/vec_math.h> #include <cuda/helpers.h> #include <cuda/random.h> extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ struct RadiancePRD { // TODO: move some state directly into payload registers? float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; unsigned int seed; int countEmitted; int done; int pad; }; struct Onb { __forceinline__ __device__ Onb(const float3& normal) { m_normal = normal; if( fabs(m_normal.x) > fabs(m_normal.z) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize(m_binormal); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform(float3& p) const { p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal; } float3 m_tangent; float3 m_binormal; float3 m_normal; }; //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 ) { const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 ) { const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const unsigned int u0 = optixGetPayload_0(); const unsigned int u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<unsigned int>( occluded ) ); } static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f*M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) ); } static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { // TODO: deduce stride from num ray-types passed in params unsigned int u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { unsigned int occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } __forceinline__ __device__ float3 deviceColor( unsigned int idx ) { return make_float3( idx == 0 ? 0.05f : 0.0f, idx == 1 ? 0.05f : 0.0f, idx == 2 ? 0.05f : 0.0f ); } //------------------------------------------------------------------------------ // // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const uint3 launch_idx = optixGetLaunchIndex(); const int2 pixel_idx = params.sample_index_buffer[ launch_idx.x ]; // Work distribution might assign tiles that cross over image boundary if( pixel_idx.x > w-1 || pixel_idx.y > h-1 ) return; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const int subframe_index = params.subframe_index; unsigned int seed = tea<4>( pixel_idx.y*w + pixel_idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { // The center of each pixel is at fraction (0.5,0.5) const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) ); const float2 d = 2.0f * make_float2( ( static_cast<float>( pixel_idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( pixel_idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin // TODO: smarter offset 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( prd.done || depth >= 3 ) // TODO RR, variable for depth break; ray_origin = prd.origin; ray_direction = prd.direction; ++depth; } } while( --i ); float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.sample_accum_buffer[ launch_idx.x ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.sample_accum_buffer [ launch_idx.x ] = make_float4( accum_color, 1.0f); const unsigned int image_index = pixel_idx.y * params.width + pixel_idx.x; params.result_buffer[ image_index ] = make_color ( accum_color + deviceColor( params.device_idx ) ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b ); prd->done = true; } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] ); const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] ); const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] ); const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) ); const float3 N = faceforward( N_0, -ray_dir, N_0 ); const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir; RadiancePRD* prd = getPRD(); if( prd->countEmitted ) prd->emitted = rt_data->emission_color; else prd->emitted = make_float3( 0.0f ); unsigned int seed = prd->seed; { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in; cosine_sample_hemisphere( z1, z2, w_in ); Onb onb( N ); onb.inverse_transform( w_in ); prd->direction = w_in; prd->origin = P; prd->attenuation *= rt_data->diffuse_color; prd->countEmitted = false; } const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; ParallelogramLight light = params.light; const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P ); const float3 L = normalize(light_pos - P ); const float nDl = dot( N, L ); const float LnDl = -dot( light.normal, L ); float weight = 0.0f; if( nDl > 0.0f && LnDl > 0.0f ) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if( !occluded ) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += light.emission * weight; }
bdc9bddffb7203107ff8ecad6b62b28286374ba6.cu
// // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <optix.h> #include "optixMultiGPU.h" #include <sutil/vec_math.h> #include <cuda/helpers.h> #include <cuda/random.h> extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ struct RadiancePRD { // TODO: move some state directly into payload registers? float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; unsigned int seed; int countEmitted; int done; int pad; }; struct Onb { __forceinline__ __device__ Onb(const float3& normal) { m_normal = normal; if( fabs(m_normal.x) > fabs(m_normal.z) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize(m_binormal); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform(float3& p) const { p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal; } float3 m_tangent; float3 m_binormal; float3 m_normal; }; //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 ) { const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 ) { const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const unsigned int u0 = optixGetPayload_0(); const unsigned int u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<unsigned int>( occluded ) ); } static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f*M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) ); } static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { // TODO: deduce stride from num ray-types passed in params unsigned int u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { unsigned int occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } __forceinline__ __device__ float3 deviceColor( unsigned int idx ) { return make_float3( idx == 0 ? 0.05f : 0.0f, idx == 1 ? 0.05f : 0.0f, idx == 2 ? 0.05f : 0.0f ); } //------------------------------------------------------------------------------ // // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const uint3 launch_idx = optixGetLaunchIndex(); const int2 pixel_idx = params.sample_index_buffer[ launch_idx.x ]; // Work distribution might assign tiles that cross over image boundary if( pixel_idx.x > w-1 || pixel_idx.y > h-1 ) return; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const int subframe_index = params.subframe_index; unsigned int seed = tea<4>( pixel_idx.y*w + pixel_idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { // The center of each pixel is at fraction (0.5,0.5) const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) ); const float2 d = 2.0f * make_float2( ( static_cast<float>( pixel_idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( pixel_idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin // TODO: smarter offset 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( prd.done || depth >= 3 ) // TODO RR, variable for depth break; ray_origin = prd.origin; ray_direction = prd.direction; ++depth; } } while( --i ); float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.sample_accum_buffer[ launch_idx.x ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.sample_accum_buffer [ launch_idx.x ] = make_float4( accum_color, 1.0f); const unsigned int image_index = pixel_idx.y * params.width + pixel_idx.x; params.result_buffer[ image_index ] = make_color ( accum_color + deviceColor( params.device_idx ) ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b ); prd->done = true; } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] ); const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] ); const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] ); const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) ); const float3 N = faceforward( N_0, -ray_dir, N_0 ); const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir; RadiancePRD* prd = getPRD(); if( prd->countEmitted ) prd->emitted = rt_data->emission_color; else prd->emitted = make_float3( 0.0f ); unsigned int seed = prd->seed; { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in; cosine_sample_hemisphere( z1, z2, w_in ); Onb onb( N ); onb.inverse_transform( w_in ); prd->direction = w_in; prd->origin = P; prd->attenuation *= rt_data->diffuse_color; prd->countEmitted = false; } const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; ParallelogramLight light = params.light; const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P ); const float3 L = normalize(light_pos - P ); const float nDl = dot( N, L ); const float LnDl = -dot( light.normal, L ); float weight = 0.0f; if( nDl > 0.0f && LnDl > 0.0f ) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if( !occluded ) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += light.emission * weight; }
ce672462aed10f81afe88d43162d63f713494016.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * AnglePotential.cu * * Created on: Aug 4, 2010 * Author: zhmurov */ #include "../Core/global.h" #include "../Core/md.cuh" #include "../Util/Log.h" #include "AnglePotential.cuh" namespace angle_potential { class Log: public ILog { virtual void Write(const char* message) const { std::cout << makeTimePrefix() << "<angle_potential> " << message << std::endl; } } log; #define LOG LogStream(log) void create(){ potential.compute = &compute; potential.destroy = &destroy; sprintf(potential.name, "Angle potential"); potentials[potentialsCount] = &potential; potentialsCount ++; energyOutput.computeValues = &computeEnergy; allocateCPU((void**)&energyOutput.values, parameters.Ntr*sizeof(float)); strcpy(energyOutput.name, ENERGY_OUTPUT_NAME_ANGLE); energyOutputs[energyOutputsCount] = &energyOutput; energyOutputsCount ++; init(); } void init(){ LOG << "Initializing angle potential..."; angleData.A = topology.angleCount; angleData.Atot = topology.angleCount*parameters.Ntr; angleBlockSize = BLOCK_SIZE; angleBlockCount = angleData.Atot/BLOCK_SIZE + 1; angleSummBlockSize = BLOCK_SIZE; angleSummBlockCount = gsystem.Ntot/BLOCK_SIZE + 1; if(angleData.Atot > 0){ allocateCPU((void**)&angleData.h_angleCount, gsystem.Ntot*sizeof(int)); allocateGPU((void**)&angleData.d_angleCount, gsystem.Ntot*sizeof(int)); int i, a; //Testing /*int atomsInBlock = 0; int anglesInBlock = 0; int anglesInBlockBefore = 0; int currentBlock = 0; for(i = 0; i < gsystem.Nsim; i++){ atomsInBlock ++; for(a = 0; a < topology.angleCount; a++){ Angle angle = topology.angles[a]; if(angle.i == i || angle.j == i || angle.k == i){ anglesInBlock ++; } } if(anglesInBlock >= angleBlockSize){ if(anglesInBlock == angleBlockSize){ printf("Block %d: %d atoms, %d angles.\n", currentBlock, atomsInBlock, anglesInBlock); } else { printf("Block %d: %d atoms, %d angles.\n", currentBlock, atomsInBlock, anglesInBlockBefore); i--; } anglesInBlock = 0; atomsInBlock = 0; currentBlock ++; } else { anglesInBlockBefore = anglesInBlock; } }*/ //exit(0); //Done Testing for(i = 0; i < gsystem.Ntot; i++){ angleData.h_angleCount[i] = 0; } for(a = 0; a < angleData.A; a++){ Angle angle = topology.angles[a]; angleData.h_angleCount[angle.i]++; angleData.h_angleCount[angle.j]++; angleData.h_angleCount[angle.k]++; } angleData.maxAnglesPerAtom = 0; for(i = 0; i < gsystem.N; i++){ if(angleData.h_angleCount[i] > angleData.maxAnglesPerAtom){ angleData.maxAnglesPerAtom = angleData.h_angleCount[i]; } } LOG << "Maximum angles per atom is " << angleData.maxAnglesPerAtom; allocateCPU((void**)&angleData.h_angles, angleData.Atot*sizeof(int4)); allocateGPU((void**)&angleData.d_angles, angleData.Atot*sizeof(int4)); allocateCPU((void**)&angleData.h_angleRefs, angleData.Atot*sizeof(int4)); allocateGPU((void**)&angleData.d_angleRefs, angleData.Atot*sizeof(int4)); allocateCPU((void**)&angleData.h_angleForces, gsystem.widthTot*angleData.maxAnglesPerAtom*sizeof(float4)); allocateGPU((void**)&angleData.d_angleForces, gsystem.widthTot*angleData.maxAnglesPerAtom*sizeof(float4)); allocateCPU((void**)&angleData.h_angleTypes, angleTypesCount*sizeof(float2)); allocateGPU((void**)&angleData.d_angleTypes, angleTypesCount*sizeof(float2)); allocateCPU((void**)&angleData.h_angleEnergies, angleData.Atot*sizeof(float)); allocateGPU((void**)&angleData.d_angleEnergies, angleData.Atot*sizeof(float)); for(i = 0; i < gsystem.Ntot; i++){ angleData.h_angleCount[i] = 0; } for(a = 0; a < angleData.A; a++){ Angle angle = topology.angles[a]; angleData.h_angles[a].x = angle.i; angleData.h_angles[a].y = angle.j; angleData.h_angles[a].z = angle.k; angleData.h_angles[a].w = angle.type; angleData.h_angleRefs[a].x = angleData.h_angleCount[angle.i]; angleData.h_angleRefs[a].y = angleData.h_angleCount[angle.j]; angleData.h_angleRefs[a].z = angleData.h_angleCount[angle.k]; angleData.h_angleCount[angle.i]++; angleData.h_angleCount[angle.j]++; angleData.h_angleCount[angle.k]++; } for(i = 0; i < gsystem.N; i++){ if(angleData.h_angleCount[i] > angleData.maxAnglesPerAtom){ DIE("Maximum angles per atom exceeded the limit of %d on atom %d", angleData.maxAnglesPerAtom, i); } } int traj, atot, itot; for(traj = 1; traj < parameters.Ntr; traj++){ for(a = 0; a < angleData.A; a++){ atot = angleData.A*traj + a; angleData.h_angles[atot].x = angleData.h_angles[a].x + gsystem.N*traj; angleData.h_angles[atot].y = angleData.h_angles[a].y + gsystem.N*traj; angleData.h_angles[atot].z = angleData.h_angles[a].z + gsystem.N*traj; angleData.h_angles[atot].w = angleData.h_angles[a].w; angleData.h_angleRefs[atot].x = angleData.h_angleRefs[a].x; angleData.h_angleRefs[atot].y = angleData.h_angleRefs[a].y; angleData.h_angleRefs[atot].z = angleData.h_angleRefs[a].z; } for(i = 0; i < gsystem.N; i++){ itot = gsystem.N*traj + i; angleData.h_angleCount[itot] = angleData.h_angleCount[i]; } } /*for(a = 0; a < topology.angleCount; a++){ printf("%d: (%d-%d-%d, %d) \n", a, angleData.h_angles[a].x, angleData.h_angles[a].y, angleData.h_angles[a].z, angleData.h_angles[a].w); }*/ for(i = 0; i < angleTypesCount; i++){ angleData.h_angleTypes[i].x = angleTypes[i].ktheta; angleData.h_angleTypes[i].y = angleTypes[i].theta0; } hipMemcpy(angleData.d_angleCount, angleData.h_angleCount, gsystem.Ntot*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(angleData.d_angles, angleData.h_angles, angleData.Atot*sizeof(int4), hipMemcpyHostToDevice); hipMemcpy(angleData.d_angleRefs, angleData.h_angleRefs, angleData.Atot*sizeof(int4), hipMemcpyHostToDevice); hipMemcpy(angleData.d_angleTypes, angleData.h_angleTypes, angleTypesCount*sizeof(int2), hipMemcpyHostToDevice); hipBindTexture(0, t_angleTypes, angleData.d_angleTypes, angleTypesCount*sizeof(int2)); } hipMemcpyToSymbol(c_angleData, &angleData, sizeof(GAngleData), 0, hipMemcpyHostToDevice); LOG << "Done initializing angle potential."; } __global__ void harmonicAnglePotential_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_angleData.Atot){ int4 angle = c_angleData.d_angles[d_i]; int4 ref = c_angleData.d_angleRefs[d_i]; float4 r1 = tex1Dfetch(t_coord, angle.x); float4 r2 = tex1Dfetch(t_coord, angle.y); float4 r3 = tex1Dfetch(t_coord, angle.z); float2 par = tex1Dfetch(t_angleTypes, angle.w); float3 dr12, dr32; dr12.x = r1.x - r2.x; dr12.y = r1.y - r2.y; dr12.z = r1.z - r2.z; DO_PBC(dr12); dr32.x = r3.x - r2.x; dr32.y = r3.y - r2.y; dr32.z = r3.z - r2.z; DO_PBC(dr32); float r12inv = 1.0f/sqrtf(dr12.x*dr12.x + dr12.y*dr12.y + dr12.z*dr12.z); float r32inv = 1.0f/sqrtf(dr32.x*dr32.x + dr32.y*dr32.y + dr32.z*dr32.z); float costheta = (dr12.x*dr32.x + dr12.y*dr32.y + dr12.z*dr32.z)*r12inv*r32inv; if(costheta > 1.0f){ costheta = 1.0f; } else if(costheta < -1.0f){ costheta = -1.0f; } float sintheta = sqrtf(1.0f - costheta*costheta); float theta = acos(costheta); float diff = theta - par.y; if(sintheta < 1.e-6){ if(diff < 0){ diff *= 2.0f*par.x; } else { diff *= -2.0f*par.x; } } else { diff *= (-2.0f*par.x) / sintheta; } float c1 = diff*r12inv; float c2 = diff*r32inv; float4 f1, f2, f3; f1.x = c1*(dr12.x*(r12inv*costheta) - dr32.x*r32inv); f1.y = c1*(dr12.y*(r12inv*costheta) - dr32.y*r32inv); f1.z = c1*(dr12.z*(r12inv*costheta) - dr32.z*r32inv); f2 = f1; f3.x = c2*(dr32.x*(r32inv*costheta) - dr12.x*r12inv); f3.y = c2*(dr32.y*(r32inv*costheta) - dr12.y*r12inv); f3.z = c2*(dr32.z*(r32inv*costheta) - dr12.z*r12inv); f2.x += f3.x; f2.y += f3.y; f2.z += f3.z; f2.x = -f2.x; f2.y = -f2.y; f2.z = -f2.z; c_angleData.d_angleForces[c_gsystem.widthTot*ref.x + angle.x] = f1; c_angleData.d_angleForces[c_gsystem.widthTot*ref.y + angle.y] = f2; c_angleData.d_angleForces[c_gsystem.widthTot*ref.z + angle.z] = f3; } } __global__ void summAngleForces_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_gsystem.Ntot){ float4 f = c_gsystem.d_forces[d_i]; float4 df; int i; for(i = 0; i < c_angleData.d_angleCount[d_i]; i++){ df = c_angleData.d_angleForces[c_gsystem.widthTot*i + d_i]; f.x += df.x; f.y += df.y; f.z += df.z; } c_gsystem.d_forces[d_i] = f; } } inline void compute(){ //checkCUDAError("before angle potential"); hipLaunchKernelGGL(( harmonicAnglePotential_kernel), dim3(angleBlockCount), dim3(angleBlockSize), 0, 0, ); //checkCUDAError("before sum angles"); hipLaunchKernelGGL(( summAngleForces_kernel), dim3(angleSummBlockCount), dim3(angleSummBlockSize), 0, 0, ); //checkCUDAError("after sum angles"); /*hipMemcpy(gsystem.h_forces, gsystem.d_forces, atomCount*sizeof(float4), hipMemcpyDeviceToHost); int i; float3 force = make_float3(0.0f, 0.0f, 0.0f); for(i = 0; i < atomCount; i++){ force.x += gsystem.h_forces[i].x; force.y += gsystem.h_forces[i].y; force.z += gsystem.h_forces[i].z; printf("%d: (%f, %f, %f) %f\n", i, gsystem.h_forces[i].x, gsystem.h_forces[i].y, gsystem.h_forces[i].z, sqrtf(gsystem.h_forces[i].x*gsystem.h_forces[i].x + gsystem.h_forces[i].y*gsystem.h_forces[i].y + gsystem.h_forces[i].z*gsystem.h_forces[i].z)); } printf("Net force (angles): (%f, %f, %f) %f\n", force.x, force.y, force.z, sqrtf(force.x*force.x + force.y*force.y + force.z*force.z));*/ } __global__ void harmonicAnglePotentialEnergy_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_angleData.Atot){ int4 angle = c_angleData.d_angles[d_i]; float4 r1 = tex1Dfetch(t_coord, angle.x); float4 r2 = tex1Dfetch(t_coord, angle.y); float4 r3 = tex1Dfetch(t_coord, angle.z); float2 par = tex1Dfetch(t_angleTypes, angle.w); float3 dr12, dr32; dr12.x = r1.x - r2.x; dr12.y = r1.y - r2.y; dr12.z = r1.z - r2.z; DO_PBC(dr12); dr32.x = r3.x - r2.x; dr32.y = r3.y - r2.y; dr32.z = r3.z - r2.z; DO_PBC(dr32); float r12inv = 1.0f/sqrtf(dr12.x*dr12.x + dr12.y*dr12.y + dr12.z*dr12.z); float r32inv = 1.0f/sqrtf(dr32.x*dr32.x + dr32.y*dr32.y + dr32.z*dr32.z); float costheta = (dr12.x*dr32.x + dr12.y*dr32.y + dr12.z*dr32.z)*r12inv*r32inv; if(costheta > 1.0f){ costheta = 1.0f; } else if(costheta < -1.0f){ costheta = -1.0f; } float theta = acos(costheta); float diff = theta - par.y; c_angleData.d_angleEnergies[d_i] = par.x*diff*diff; } } inline void computeEnergy(){ hipLaunchKernelGGL(( harmonicAnglePotentialEnergy_kernel), dim3(angleBlockCount), dim3(angleBlockSize), 0, 0, ); hipMemcpy(angleData.h_angleEnergies, angleData.d_angleEnergies, angleData.Atot*sizeof(float), hipMemcpyDeviceToHost); int i, traj; for(traj = 0; traj < parameters.Ntr; traj++){ float pot = 0.0f; for(i = 0; i < angleData.A; i++){ pot += angleData.h_angleEnergies[i + traj*angleData.A]; } energyOutput.values[traj] = pot; } checkCUDAError("angle energy"); } void destroy(){ } #undef LOG } // namespace angle_potential
ce672462aed10f81afe88d43162d63f713494016.cu
/* * AnglePotential.cu * * Created on: Aug 4, 2010 * Author: zhmurov */ #include "../Core/global.h" #include "../Core/md.cuh" #include "../Util/Log.h" #include "AnglePotential.cuh" namespace angle_potential { class Log: public ILog { virtual void Write(const char* message) const { std::cout << makeTimePrefix() << "<angle_potential> " << message << std::endl; } } log; #define LOG LogStream(log) void create(){ potential.compute = &compute; potential.destroy = &destroy; sprintf(potential.name, "Angle potential"); potentials[potentialsCount] = &potential; potentialsCount ++; energyOutput.computeValues = &computeEnergy; allocateCPU((void**)&energyOutput.values, parameters.Ntr*sizeof(float)); strcpy(energyOutput.name, ENERGY_OUTPUT_NAME_ANGLE); energyOutputs[energyOutputsCount] = &energyOutput; energyOutputsCount ++; init(); } void init(){ LOG << "Initializing angle potential..."; angleData.A = topology.angleCount; angleData.Atot = topology.angleCount*parameters.Ntr; angleBlockSize = BLOCK_SIZE; angleBlockCount = angleData.Atot/BLOCK_SIZE + 1; angleSummBlockSize = BLOCK_SIZE; angleSummBlockCount = gsystem.Ntot/BLOCK_SIZE + 1; if(angleData.Atot > 0){ allocateCPU((void**)&angleData.h_angleCount, gsystem.Ntot*sizeof(int)); allocateGPU((void**)&angleData.d_angleCount, gsystem.Ntot*sizeof(int)); int i, a; //Testing /*int atomsInBlock = 0; int anglesInBlock = 0; int anglesInBlockBefore = 0; int currentBlock = 0; for(i = 0; i < gsystem.Nsim; i++){ atomsInBlock ++; for(a = 0; a < topology.angleCount; a++){ Angle angle = topology.angles[a]; if(angle.i == i || angle.j == i || angle.k == i){ anglesInBlock ++; } } if(anglesInBlock >= angleBlockSize){ if(anglesInBlock == angleBlockSize){ printf("Block %d: %d atoms, %d angles.\n", currentBlock, atomsInBlock, anglesInBlock); } else { printf("Block %d: %d atoms, %d angles.\n", currentBlock, atomsInBlock, anglesInBlockBefore); i--; } anglesInBlock = 0; atomsInBlock = 0; currentBlock ++; } else { anglesInBlockBefore = anglesInBlock; } }*/ //exit(0); //Done Testing for(i = 0; i < gsystem.Ntot; i++){ angleData.h_angleCount[i] = 0; } for(a = 0; a < angleData.A; a++){ Angle angle = topology.angles[a]; angleData.h_angleCount[angle.i]++; angleData.h_angleCount[angle.j]++; angleData.h_angleCount[angle.k]++; } angleData.maxAnglesPerAtom = 0; for(i = 0; i < gsystem.N; i++){ if(angleData.h_angleCount[i] > angleData.maxAnglesPerAtom){ angleData.maxAnglesPerAtom = angleData.h_angleCount[i]; } } LOG << "Maximum angles per atom is " << angleData.maxAnglesPerAtom; allocateCPU((void**)&angleData.h_angles, angleData.Atot*sizeof(int4)); allocateGPU((void**)&angleData.d_angles, angleData.Atot*sizeof(int4)); allocateCPU((void**)&angleData.h_angleRefs, angleData.Atot*sizeof(int4)); allocateGPU((void**)&angleData.d_angleRefs, angleData.Atot*sizeof(int4)); allocateCPU((void**)&angleData.h_angleForces, gsystem.widthTot*angleData.maxAnglesPerAtom*sizeof(float4)); allocateGPU((void**)&angleData.d_angleForces, gsystem.widthTot*angleData.maxAnglesPerAtom*sizeof(float4)); allocateCPU((void**)&angleData.h_angleTypes, angleTypesCount*sizeof(float2)); allocateGPU((void**)&angleData.d_angleTypes, angleTypesCount*sizeof(float2)); allocateCPU((void**)&angleData.h_angleEnergies, angleData.Atot*sizeof(float)); allocateGPU((void**)&angleData.d_angleEnergies, angleData.Atot*sizeof(float)); for(i = 0; i < gsystem.Ntot; i++){ angleData.h_angleCount[i] = 0; } for(a = 0; a < angleData.A; a++){ Angle angle = topology.angles[a]; angleData.h_angles[a].x = angle.i; angleData.h_angles[a].y = angle.j; angleData.h_angles[a].z = angle.k; angleData.h_angles[a].w = angle.type; angleData.h_angleRefs[a].x = angleData.h_angleCount[angle.i]; angleData.h_angleRefs[a].y = angleData.h_angleCount[angle.j]; angleData.h_angleRefs[a].z = angleData.h_angleCount[angle.k]; angleData.h_angleCount[angle.i]++; angleData.h_angleCount[angle.j]++; angleData.h_angleCount[angle.k]++; } for(i = 0; i < gsystem.N; i++){ if(angleData.h_angleCount[i] > angleData.maxAnglesPerAtom){ DIE("Maximum angles per atom exceeded the limit of %d on atom %d", angleData.maxAnglesPerAtom, i); } } int traj, atot, itot; for(traj = 1; traj < parameters.Ntr; traj++){ for(a = 0; a < angleData.A; a++){ atot = angleData.A*traj + a; angleData.h_angles[atot].x = angleData.h_angles[a].x + gsystem.N*traj; angleData.h_angles[atot].y = angleData.h_angles[a].y + gsystem.N*traj; angleData.h_angles[atot].z = angleData.h_angles[a].z + gsystem.N*traj; angleData.h_angles[atot].w = angleData.h_angles[a].w; angleData.h_angleRefs[atot].x = angleData.h_angleRefs[a].x; angleData.h_angleRefs[atot].y = angleData.h_angleRefs[a].y; angleData.h_angleRefs[atot].z = angleData.h_angleRefs[a].z; } for(i = 0; i < gsystem.N; i++){ itot = gsystem.N*traj + i; angleData.h_angleCount[itot] = angleData.h_angleCount[i]; } } /*for(a = 0; a < topology.angleCount; a++){ printf("%d: (%d-%d-%d, %d) \n", a, angleData.h_angles[a].x, angleData.h_angles[a].y, angleData.h_angles[a].z, angleData.h_angles[a].w); }*/ for(i = 0; i < angleTypesCount; i++){ angleData.h_angleTypes[i].x = angleTypes[i].ktheta; angleData.h_angleTypes[i].y = angleTypes[i].theta0; } cudaMemcpy(angleData.d_angleCount, angleData.h_angleCount, gsystem.Ntot*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(angleData.d_angles, angleData.h_angles, angleData.Atot*sizeof(int4), cudaMemcpyHostToDevice); cudaMemcpy(angleData.d_angleRefs, angleData.h_angleRefs, angleData.Atot*sizeof(int4), cudaMemcpyHostToDevice); cudaMemcpy(angleData.d_angleTypes, angleData.h_angleTypes, angleTypesCount*sizeof(int2), cudaMemcpyHostToDevice); cudaBindTexture(0, t_angleTypes, angleData.d_angleTypes, angleTypesCount*sizeof(int2)); } cudaMemcpyToSymbol(c_angleData, &angleData, sizeof(GAngleData), 0, cudaMemcpyHostToDevice); LOG << "Done initializing angle potential."; } __global__ void harmonicAnglePotential_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_angleData.Atot){ int4 angle = c_angleData.d_angles[d_i]; int4 ref = c_angleData.d_angleRefs[d_i]; float4 r1 = tex1Dfetch(t_coord, angle.x); float4 r2 = tex1Dfetch(t_coord, angle.y); float4 r3 = tex1Dfetch(t_coord, angle.z); float2 par = tex1Dfetch(t_angleTypes, angle.w); float3 dr12, dr32; dr12.x = r1.x - r2.x; dr12.y = r1.y - r2.y; dr12.z = r1.z - r2.z; DO_PBC(dr12); dr32.x = r3.x - r2.x; dr32.y = r3.y - r2.y; dr32.z = r3.z - r2.z; DO_PBC(dr32); float r12inv = 1.0f/sqrtf(dr12.x*dr12.x + dr12.y*dr12.y + dr12.z*dr12.z); float r32inv = 1.0f/sqrtf(dr32.x*dr32.x + dr32.y*dr32.y + dr32.z*dr32.z); float costheta = (dr12.x*dr32.x + dr12.y*dr32.y + dr12.z*dr32.z)*r12inv*r32inv; if(costheta > 1.0f){ costheta = 1.0f; } else if(costheta < -1.0f){ costheta = -1.0f; } float sintheta = sqrtf(1.0f - costheta*costheta); float theta = acos(costheta); float diff = theta - par.y; if(sintheta < 1.e-6){ if(diff < 0){ diff *= 2.0f*par.x; } else { diff *= -2.0f*par.x; } } else { diff *= (-2.0f*par.x) / sintheta; } float c1 = diff*r12inv; float c2 = diff*r32inv; float4 f1, f2, f3; f1.x = c1*(dr12.x*(r12inv*costheta) - dr32.x*r32inv); f1.y = c1*(dr12.y*(r12inv*costheta) - dr32.y*r32inv); f1.z = c1*(dr12.z*(r12inv*costheta) - dr32.z*r32inv); f2 = f1; f3.x = c2*(dr32.x*(r32inv*costheta) - dr12.x*r12inv); f3.y = c2*(dr32.y*(r32inv*costheta) - dr12.y*r12inv); f3.z = c2*(dr32.z*(r32inv*costheta) - dr12.z*r12inv); f2.x += f3.x; f2.y += f3.y; f2.z += f3.z; f2.x = -f2.x; f2.y = -f2.y; f2.z = -f2.z; c_angleData.d_angleForces[c_gsystem.widthTot*ref.x + angle.x] = f1; c_angleData.d_angleForces[c_gsystem.widthTot*ref.y + angle.y] = f2; c_angleData.d_angleForces[c_gsystem.widthTot*ref.z + angle.z] = f3; } } __global__ void summAngleForces_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_gsystem.Ntot){ float4 f = c_gsystem.d_forces[d_i]; float4 df; int i; for(i = 0; i < c_angleData.d_angleCount[d_i]; i++){ df = c_angleData.d_angleForces[c_gsystem.widthTot*i + d_i]; f.x += df.x; f.y += df.y; f.z += df.z; } c_gsystem.d_forces[d_i] = f; } } inline void compute(){ //checkCUDAError("before angle potential"); harmonicAnglePotential_kernel<<<angleBlockCount, angleBlockSize>>>(); //checkCUDAError("before sum angles"); summAngleForces_kernel<<<angleSummBlockCount, angleSummBlockSize>>>(); //checkCUDAError("after sum angles"); /*cudaMemcpy(gsystem.h_forces, gsystem.d_forces, atomCount*sizeof(float4), cudaMemcpyDeviceToHost); int i; float3 force = make_float3(0.0f, 0.0f, 0.0f); for(i = 0; i < atomCount; i++){ force.x += gsystem.h_forces[i].x; force.y += gsystem.h_forces[i].y; force.z += gsystem.h_forces[i].z; printf("%d: (%f, %f, %f) %f\n", i, gsystem.h_forces[i].x, gsystem.h_forces[i].y, gsystem.h_forces[i].z, sqrtf(gsystem.h_forces[i].x*gsystem.h_forces[i].x + gsystem.h_forces[i].y*gsystem.h_forces[i].y + gsystem.h_forces[i].z*gsystem.h_forces[i].z)); } printf("Net force (angles): (%f, %f, %f) %f\n", force.x, force.y, force.z, sqrtf(force.x*force.x + force.y*force.y + force.z*force.z));*/ } __global__ void harmonicAnglePotentialEnergy_kernel(){ int d_i = blockIdx.x*blockDim.x + threadIdx.x; if(d_i < c_angleData.Atot){ int4 angle = c_angleData.d_angles[d_i]; float4 r1 = tex1Dfetch(t_coord, angle.x); float4 r2 = tex1Dfetch(t_coord, angle.y); float4 r3 = tex1Dfetch(t_coord, angle.z); float2 par = tex1Dfetch(t_angleTypes, angle.w); float3 dr12, dr32; dr12.x = r1.x - r2.x; dr12.y = r1.y - r2.y; dr12.z = r1.z - r2.z; DO_PBC(dr12); dr32.x = r3.x - r2.x; dr32.y = r3.y - r2.y; dr32.z = r3.z - r2.z; DO_PBC(dr32); float r12inv = 1.0f/sqrtf(dr12.x*dr12.x + dr12.y*dr12.y + dr12.z*dr12.z); float r32inv = 1.0f/sqrtf(dr32.x*dr32.x + dr32.y*dr32.y + dr32.z*dr32.z); float costheta = (dr12.x*dr32.x + dr12.y*dr32.y + dr12.z*dr32.z)*r12inv*r32inv; if(costheta > 1.0f){ costheta = 1.0f; } else if(costheta < -1.0f){ costheta = -1.0f; } float theta = acos(costheta); float diff = theta - par.y; c_angleData.d_angleEnergies[d_i] = par.x*diff*diff; } } inline void computeEnergy(){ harmonicAnglePotentialEnergy_kernel<<<angleBlockCount, angleBlockSize>>>(); cudaMemcpy(angleData.h_angleEnergies, angleData.d_angleEnergies, angleData.Atot*sizeof(float), cudaMemcpyDeviceToHost); int i, traj; for(traj = 0; traj < parameters.Ntr; traj++){ float pot = 0.0f; for(i = 0; i < angleData.A; i++){ pot += angleData.h_angleEnergies[i + traj*angleData.A]; } energyOutput.values[traj] = pot; } checkCUDAError("angle energy"); } void destroy(){ } #undef LOG } // namespace angle_potential
1d17ea769f5952c7c8429c8e92eca0203ae17b25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017 XGBoost contributors */ #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <memory> #include <queue> #include <utility> #include <vector> #include "../common/compressed_iterator.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "../common/timer.h" #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu_hist); typedef bst_gpair_precise gpair_sum_t; template <int BLOCK_THREADS, typename reduce_t, typename temp_storage_t> __device__ gpair_sum_t ReduceFeature(const gpair_sum_t* begin, const gpair_sum_t* end, temp_storage_t* temp_storage) { __shared__ cub::Uninitialized<gpair_sum_t> uninitialized_sum; gpair_sum_t& shared_sum = uninitialized_sum.Alias(); gpair_sum_t local_sum = gpair_sum_t(); for (auto itr = begin; itr < end; itr += BLOCK_THREADS) { bool thread_active = itr + threadIdx.x < end; // Scan histogram gpair_sum_t bin = thread_active ? *(itr + threadIdx.x) : gpair_sum_t(); local_sum += reduce_t(temp_storage->sum_reduce).Reduce(bin, hipcub::Sum()); } if (threadIdx.x == 0) { shared_sum = local_sum; } __syncthreads(); return shared_sum; } template <int BLOCK_THREADS, typename reduce_t, typename scan_t, typename max_reduce_t, typename temp_storage_t> __device__ void EvaluateFeature(int fidx, const gpair_sum_t* hist, const int* feature_segments, float min_fvalue, const float* gidx_fvalue_map, DeviceSplitCandidate* best_split, const DeviceNodeStats& node, const GPUTrainingParam& param, temp_storage_t* temp_storage, int constraint, const ValueConstraint& value_constraint) { int gidx_begin = feature_segments[fidx]; int gidx_end = feature_segments[fidx + 1]; gpair_sum_t feature_sum = ReduceFeature<BLOCK_THREADS, reduce_t>( hist + gidx_begin, hist + gidx_end, temp_storage); auto prefix_op = SumCallbackOp<gpair_sum_t>(); for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += BLOCK_THREADS) { bool thread_active = scan_begin + threadIdx.x < gidx_end; gpair_sum_t bin = thread_active ? hist[scan_begin + threadIdx.x] : gpair_sum_t(); scan_t(temp_storage->scan).ExclusiveScan(bin, bin, hipcub::Sum(), prefix_op); // Calculate gain gpair_sum_t parent_sum = gpair_sum_t(node.sum_gradients); gpair_sum_t missing = parent_sum - feature_sum; bool missing_left = true; const float null_gain = -FLT_MAX; float gain = null_gain; if (thread_active) { gain = loss_chg_missing(bin, missing, parent_sum, node.root_gain, param, constraint, value_constraint, missing_left); } __syncthreads(); // Find thread with best gain hipcub::KeyValuePair<int, float> tuple(threadIdx.x, gain); hipcub::KeyValuePair<int, float> best = max_reduce_t(temp_storage->max_reduce).Reduce(tuple, hipcub::ArgMax()); __shared__ hipcub::KeyValuePair<int, float> block_max; if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { int gidx = scan_begin + threadIdx.x; float fvalue = gidx == gidx_begin ? min_fvalue : gidx_fvalue_map[gidx - 1]; gpair_sum_t left = missing_left ? bin + missing : bin; gpair_sum_t right = parent_sum - left; best_split->Update(gain, missing_left ? LeftDir : RightDir, fvalue, fidx, left, right, param); } __syncthreads(); } } template <int BLOCK_THREADS> __global__ void evaluate_split_kernel( const gpair_sum_t* d_hist, int nidx, uint64_t n_features, DeviceNodeStats nodes, const int* d_feature_segments, const float* d_fidx_min_map, const float* d_gidx_fvalue_map, GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split, ValueConstraint value_constraint, int* d_monotonic_constraints) { typedef hipcub::KeyValuePair<int, float> ArgMaxT; typedef hipcub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT; typedef hipcub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT; typedef hipcub::BlockReduce<gpair_sum_t, BLOCK_THREADS> SumReduceT; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); auto fidx = blockIdx.x; auto constraint = d_monotonic_constraints[fidx]; EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map, &best_split, nodes, gpu_param, &temp_storage, constraint, value_constraint); __syncthreads(); if (threadIdx.x == 0) { // Record best loss d_split[fidx] = best_split; } } // Find a gidx value for a given feature otherwise return -1 if not found template <typename gidx_iter_t> __device__ int BinarySearchRow(bst_uint begin, bst_uint end, gidx_iter_t data, int fidx_begin, int fidx_end) { bst_uint previous_middle = UINT32_MAX; while (end != begin) { auto middle = begin + (end - begin) / 2; if (middle == previous_middle) { break; } previous_middle = middle; auto gidx = data[middle]; if (gidx >= fidx_begin && gidx < fidx_end) { return gidx; } else if (gidx < fidx_begin) { begin = middle; } else { end = middle; } } // Value is missing return -1; } struct DeviceHistogram { dh::bulk_allocator<dh::memory_type::DEVICE> ba; dh::dvec<gpair_sum_t> data; int n_bins; void Init(int device_idx, int max_nodes, int n_bins, bool silent) { this->n_bins = n_bins; ba.allocate(device_idx, silent, &data, size_t(max_nodes) * size_t(n_bins)); } void Reset() { data.fill(gpair_sum_t()); } gpair_sum_t* GetHistPtr(int nidx) { return data.data() + nidx * n_bins; } void PrintNidx(int nidx) const { auto h_data = data.as_vector(); std::cout << "nidx " << nidx << ":\n"; for (int i = n_bins * nidx; i < n_bins * (nidx + 1); i++) { std::cout << h_data[i] << " "; } std::cout << "\n"; } }; // Manage memory for a single GPU struct DeviceShard { struct Segment { size_t begin; size_t end; Segment() : begin(0), end(0) {} Segment(size_t begin, size_t end) : begin(begin), end(end) { CHECK_GE(end, begin); } size_t Size() const { return end - begin; } }; int device_idx; int normalised_device_idx; // Device index counting from param.gpu_id dh::bulk_allocator<dh::memory_type::DEVICE> ba; dh::dvec<common::compressed_byte_t> gidx_buffer; dh::dvec<bst_gpair> gpair; dh::dvec2<bst_uint> ridx; // Row index relative to this shard dh::dvec2<int> position; std::vector<Segment> ridx_segments; dh::dvec<int> feature_segments; dh::dvec<float> gidx_fvalue_map; dh::dvec<float> min_fvalue; dh::dvec<int> monotone_constraints; std::vector<bst_gpair> node_sum_gradients; common::CompressedIterator<uint32_t> gidx; int row_stride; bst_uint row_begin_idx; // The row offset for this shard bst_uint row_end_idx; bst_uint n_rows; int n_bins; int null_gidx_value; DeviceHistogram hist; TrainParam param; int64_t* tmp_pinned; // Small amount of staging memory std::vector<hipStream_t> streams; dh::CubMemory temp_memory; DeviceShard(int device_idx, int normalised_device_idx, const common::GHistIndexMatrix& gmat, bst_uint row_begin, bst_uint row_end, int n_bins, TrainParam param) : device_idx(device_idx), normalised_device_idx(normalised_device_idx), row_begin_idx(row_begin), row_end_idx(row_end), n_rows(row_end - row_begin), n_bins(n_bins), null_gidx_value(n_bins), param(param) { // Convert to ELLPACK matrix representation int max_elements_row = 0; for (auto i = row_begin; i < row_end; i++) { max_elements_row = (std::max)(max_elements_row, static_cast<int>(gmat.row_ptr[i + 1] - gmat.row_ptr[i])); } row_stride = max_elements_row; std::vector<int> ellpack_matrix(row_stride * n_rows, null_gidx_value); for (auto i = row_begin; i < row_end; i++) { int row_count = 0; for (auto j = gmat.row_ptr[i]; j < gmat.row_ptr[i + 1]; j++) { ellpack_matrix[(i - row_begin) * row_stride + row_count] = gmat.index[j]; row_count++; } } // Allocate int num_symbols = n_bins + 1; size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( ellpack_matrix.size(), num_symbols); CHECK(!(param.max_leaves == 0 && param.max_depth == 0)) << "Max leaves and max depth cannot both be unconstrained for " "gpu_hist."; int max_nodes = param.max_leaves > 0 ? param.max_leaves * 2 : n_nodes(param.max_depth); ba.allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes, &gpair, n_rows, &ridx, n_rows, &position, n_rows, &feature_segments, gmat.cut->row_ptr.size(), &gidx_fvalue_map, gmat.cut->cut.size(), &min_fvalue, gmat.cut->min_val.size(), &monotone_constraints, param.monotone_constraints.size()); gidx_fvalue_map = gmat.cut->cut; min_fvalue = gmat.cut->min_val; feature_segments = gmat.cut->row_ptr; monotone_constraints = param.monotone_constraints; node_sum_gradients.resize(max_nodes); ridx_segments.resize(max_nodes); // Compress gidx common::CompressedBufferWriter cbw(num_symbols); std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size()); cbw.Write(host_buffer.data(), ellpack_matrix.begin(), ellpack_matrix.end()); gidx_buffer = host_buffer; gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols); common::CompressedIterator<uint32_t> ci_host(host_buffer.data(), num_symbols); // Init histogram hist.Init(device_idx, max_nodes, gmat.cut->row_ptr.back(), param.silent); dh::safe_cuda(hipHostMalloc(&tmp_pinned, sizeof(int64_t))); } ~DeviceShard() { for (auto& stream : streams) { dh::safe_cuda(hipStreamDestroy(stream)); } dh::safe_cuda(hipHostFree(tmp_pinned)); } // Get vector of at least n initialised streams std::vector<hipStream_t>& GetStreams(int n) { if (n > streams.size()) { for (auto& stream : streams) { dh::safe_cuda(hipStreamDestroy(stream)); } streams.clear(); streams.resize(n); for (auto& stream : streams) { dh::safe_cuda(hipStreamCreate(&stream)); } } return streams; } // Reset values for each update iteration void Reset(const std::vector<bst_gpair>& host_gpair) { dh::safe_cuda(hipSetDevice(device_idx)); position.current_dvec().fill(0); std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), bst_gpair()); thrust::sequence(ridx.current_dvec().tbegin(), ridx.current_dvec().tend()); std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0)); ridx_segments.front() = Segment(0, ridx.size()); this->gpair.copy(host_gpair.begin() + row_begin_idx, host_gpair.begin() + row_end_idx); subsample_gpair(&gpair, param.subsample, row_begin_idx); hist.Reset(); } void BuildHist(int nidx) { auto segment = ridx_segments[nidx]; auto d_node_hist = hist.GetHistPtr(nidx); auto d_gidx = gidx; auto d_ridx = ridx.current(); auto d_gpair = gpair.data(); auto row_stride = this->row_stride; auto null_gidx_value = this->null_gidx_value; auto n_elements = segment.Size() * row_stride; dh::launch_n(device_idx, n_elements, [=] __device__(size_t idx) { int ridx = d_ridx[(idx / row_stride) + segment.begin]; int gidx = d_gidx[ridx * row_stride + idx % row_stride]; if (gidx != null_gidx_value) { AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]); } }); } void SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { auto d_node_hist_parent = hist.GetHistPtr(nidx_parent); auto d_node_hist_histogram = hist.GetHistPtr(nidx_histogram); auto d_node_hist_subtraction = hist.GetHistPtr(nidx_subtraction); dh::launch_n(device_idx, hist.n_bins, [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); } __device__ void CountLeft(int64_t* d_count, int val, int left_nidx) { unsigned ballot = __ballot(val == left_nidx); if (threadIdx.x % 32 == 0) { atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT static_cast<unsigned long long>(__popc(ballot))); // NOLINT } } void UpdatePosition(int nidx, int left_nidx, int right_nidx, int fidx, int split_gidx, bool default_dir_left, bool is_dense, int fidx_begin, int fidx_end) { dh::safe_cuda(hipSetDevice(device_idx)); temp_memory.LazyAllocate(sizeof(int64_t)); auto d_left_count = temp_memory.Pointer<int64_t>(); dh::safe_cuda(hipMemset(d_left_count, 0, sizeof(int64_t))); auto segment = ridx_segments[nidx]; auto d_ridx = ridx.current(); auto d_position = position.current(); auto d_gidx = gidx; auto row_stride = this->row_stride; dh::launch_n<1, 512>( device_idx, segment.Size(), [=] __device__(bst_uint idx) { idx += segment.begin; auto ridx = d_ridx[idx]; auto row_begin = row_stride * ridx; auto row_end = row_begin + row_stride; auto gidx = -1; if (is_dense) { gidx = d_gidx[row_begin + fidx]; } else { gidx = BinarySearchRow(row_begin, row_end, d_gidx, fidx_begin, fidx_end); } int position; if (gidx >= 0) { // Feature is found position = gidx <= split_gidx ? left_nidx : right_nidx; } else { // Feature is missing position = default_dir_left ? left_nidx : right_nidx; } CountLeft(d_left_count, position, left_nidx); d_position[idx] = position; }); dh::safe_cuda(hipMemcpy(tmp_pinned, d_left_count, sizeof(int64_t), hipMemcpyDeviceToHost)); auto left_count = *tmp_pinned; SortPosition(segment, left_nidx, right_nidx); // dh::safe_cuda(hipStreamSynchronize(stream)); ridx_segments[left_nidx] = Segment(segment.begin, segment.begin + left_count); ridx_segments[right_nidx] = Segment(segment.begin + left_count, segment.end); } void SortPosition(const Segment& segment, int left_nidx, int right_nidx) { int min_bits = 0; int max_bits = static_cast<int>( ::ceil(std::log2((std::max)(left_nidx, right_nidx) + 1))); size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, position.current() + segment.begin, position.other() + segment.begin, ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size(), min_bits, max_bits); temp_memory.LazyAllocate(temp_storage_bytes); hipcub::DeviceRadixSort::SortPairs( temp_memory.d_temp_storage, temp_memory.temp_storage_bytes, position.current() + segment.begin, position.other() + segment.begin, ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size(), min_bits, max_bits); dh::safe_cuda(hipMemcpy( position.current() + segment.begin, position.other() + segment.begin, segment.Size() * sizeof(int), hipMemcpyDeviceToDevice)); dh::safe_cuda(hipMemcpy( ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size() * sizeof(bst_uint), hipMemcpyDeviceToDevice)); } }; class GPUHistMaker : public TreeUpdater { public: struct ExpandEntry; GPUHistMaker() : initialised(false) {} ~GPUHistMaker() {} void Init( const std::vector<std::pair<std::string, std::string>>& args) override { param.InitAllowUnknown(args); CHECK(param.n_gpus != 0) << "Must have at least one device"; n_devices = param.n_gpus; dh::check_compute_capability(); if (param.grow_policy == TrainParam::kLossGuide) { qexpand_.reset(new ExpandQueue(loss_guide)); } else { qexpand_.reset(new ExpandQueue(depth_wise)); } monitor.Init("updater_gpu_hist", param.debug_verbose); } void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { monitor.Start("Update"); GradStats::CheckInfo(dmat->info()); // rescale learning rate according to size of trees float lr = param.learning_rate; param.learning_rate = lr / trees.size(); ValueConstraint::Init(&param, dmat->info().num_col); // build tree try { for (size_t i = 0; i < trees.size(); ++i) { this->UpdateTree(gpair, dmat, trees[i]); } } catch (const std::exception& e) { LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl; } param.learning_rate = lr; monitor.Stop("Update"); } void InitDataOnce(DMatrix* dmat) { info = &dmat->info(); monitor.Start("Quantiles"); hmat_.Init(dmat, param.max_bin); gmat_.cut = &hmat_; gmat_.Init(dmat); monitor.Stop("Quantiles"); n_bins = hmat_.row_ptr.back(); int n_devices = dh::n_devices(param.n_gpus, info->num_row); bst_uint row_begin = 0; bst_uint shard_size = ::ceil(static_cast<double>(info->num_row) / n_devices); std::vector<int> dList(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices(); dList[d_idx] = device_idx; } reducer.Init(dList); // Partition input matrix into row segments std::vector<size_t> row_segments; shards.resize(n_devices); row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { bst_uint row_end = ::min(static_cast<size_t>(row_begin + shard_size), info->num_row); row_segments.push_back(row_end); row_begin = row_end; } // Create device shards omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id] = std::unique_ptr<DeviceShard>( new DeviceShard(dList[cpu_thread_id], cpu_thread_id, gmat_, row_segments[cpu_thread_id], row_segments[cpu_thread_id + 1], n_bins, param)); } initialised = true; } void InitData(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const RegTree& tree) { monitor.Start("InitDataOnce"); if (!initialised) { this->InitDataOnce(dmat); } monitor.Stop("InitDataOnce"); column_sampler.Init(info->num_col, param); // Copy gpair & reset memory monitor.Start("InitDataReset"); omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id]->Reset(gpair); } monitor.Stop("InitDataReset"); } void AllReduceHist(int nidx) { for (auto& shard : shards) { auto d_node_hist = shard->hist.GetHistPtr(nidx); reducer.AllReduceSum( shard->normalised_device_idx, reinterpret_cast<gpair_sum_t::value_t*>(d_node_hist), reinterpret_cast<gpair_sum_t::value_t*>(d_node_hist), n_bins * (sizeof(gpair_sum_t) / sizeof(gpair_sum_t::value_t))); } reducer.Synchronize(); } void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right) { size_t left_node_max_elements = 0; size_t right_node_max_elements = 0; for (auto& shard : shards) { left_node_max_elements = (std::max)( left_node_max_elements, shard->ridx_segments[nidx_left].Size()); right_node_max_elements = (std::max)( right_node_max_elements, shard->ridx_segments[nidx_right].Size()); } auto build_hist_nidx = nidx_left; auto subtraction_trick_nidx = nidx_right; if (right_node_max_elements < left_node_max_elements) { build_hist_nidx = nidx_right; subtraction_trick_nidx = nidx_left; } for (auto& shard : shards) { shard->BuildHist(build_hist_nidx); } this->AllReduceHist(build_hist_nidx); for (auto& shard : shards) { shard->SubtractionTrick(nidx_parent, build_hist_nidx, subtraction_trick_nidx); } } // Returns best loss std::vector<DeviceSplitCandidate> EvaluateSplits( const std::vector<int>& nidx_set, RegTree* p_tree) { auto columns = info->num_col; std::vector<DeviceSplitCandidate> best_splits(nidx_set.size()); std::vector<DeviceSplitCandidate> candidate_splits(nidx_set.size() * columns); // Use first device auto& shard = shards.front(); dh::safe_cuda(hipSetDevice(shard->device_idx)); shard->temp_memory.LazyAllocate(sizeof(DeviceSplitCandidate) * columns * nidx_set.size()); auto d_split = shard->temp_memory.Pointer<DeviceSplitCandidate>(); auto& streams = shard->GetStreams(static_cast<int>(nidx_set.size())); // Use streams to process nodes concurrently for (auto i = 0; i < nidx_set.size(); i++) { auto nidx = nidx_set[i]; DeviceNodeStats node(shard->node_sum_gradients[nidx], nidx, param); const int BLOCK_THREADS = 256; hipLaunchKernelGGL(( evaluate_split_kernel<BLOCK_THREADS>) , dim3(uint32_t(columns)), dim3(BLOCK_THREADS), 0, streams[i], shard->hist.GetHistPtr(nidx), nidx, info->num_col, node, shard->feature_segments.data(), shard->min_fvalue.data(), shard->gidx_fvalue_map.data(), GPUTrainingParam(param), d_split + i * columns, node_value_constraints_[nidx], shard->monotone_constraints.data()); } dh::safe_cuda( hipMemcpy(candidate_splits.data(), shard->temp_memory.d_temp_storage, sizeof(DeviceSplitCandidate) * columns * nidx_set.size(), hipMemcpyDeviceToHost)); for (auto i = 0; i < nidx_set.size(); i++) { auto nidx = nidx_set[i]; DeviceSplitCandidate nidx_best; for (auto fidx = 0; fidx < columns; fidx++) { auto& candidate = candidate_splits[i * columns + fidx]; if (column_sampler.ColumnUsed(candidate.findex, p_tree->GetDepth(nidx))) { nidx_best.Update(candidate_splits[i * columns + fidx], param); } } best_splits[i] = nidx_best; } return std::move(best_splits); } void InitRoot(const std::vector<bst_gpair>& gpair, RegTree* p_tree) { auto root_nidx = 0; // Sum gradients std::vector<bst_gpair> tmp_sums(shards.size()); omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); dh::safe_cuda(hipSetDevice(shards[cpu_thread_id]->device_idx)); tmp_sums[cpu_thread_id] = thrust::reduce(thrust::hip::par(shards[cpu_thread_id]->temp_memory), shards[cpu_thread_id]->gpair.tbegin(), shards[cpu_thread_id]->gpair.tend()); } auto sum_gradient = std::accumulate(tmp_sums.begin(), tmp_sums.end(), bst_gpair()); // Generate root histogram for (auto& shard : shards) { shard->BuildHist(root_nidx); } this->AllReduceHist(root_nidx); // Remember root stats p_tree->stat(root_nidx).sum_hess = sum_gradient.GetHess(); p_tree->stat(root_nidx).base_weight = CalcWeight(param, sum_gradient); // Store sum gradients for (auto& shard : shards) { shard->node_sum_gradients[root_nidx] = sum_gradient; } // Initialise root constraint node_value_constraints_.resize(p_tree->GetNodes().size()); // Generate first split auto splits = this->EvaluateSplits({root_nidx}, p_tree); qexpand_->push( ExpandEntry(root_nidx, p_tree->GetDepth(root_nidx), splits.front(), 0)); } void UpdatePosition(const ExpandEntry& candidate, RegTree* p_tree) { auto nidx = candidate.nid; auto left_nidx = (*p_tree)[nidx].cleft(); auto right_nidx = (*p_tree)[nidx].cright(); // convert floating-point split_pt into corresponding bin_id // split_cond = -1 indicates that split_pt is less than all known cut points auto split_gidx = -1; auto fidx = candidate.split.findex; auto default_dir_left = candidate.split.dir == LeftDir; auto fidx_begin = hmat_.row_ptr[fidx]; auto fidx_end = hmat_.row_ptr[fidx + 1]; for (auto i = fidx_begin; i < fidx_end; ++i) { if (candidate.split.fvalue == hmat_.cut[i]) { split_gidx = static_cast<int32_t>(i); } } auto is_dense = info->num_nonzero == info->num_row * info->num_col; omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id]->UpdatePosition(nidx, left_nidx, right_nidx, fidx, split_gidx, default_dir_left, is_dense, fidx_begin, fidx_end); } } void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) { // Add new leaves RegTree& tree = *p_tree; tree.AddChilds(candidate.nid); auto& parent = tree[candidate.nid]; parent.set_split(candidate.split.findex, candidate.split.fvalue, candidate.split.dir == LeftDir); tree.stat(candidate.nid).loss_chg = candidate.split.loss_chg; // Set up child constraints node_value_constraints_.resize(tree.GetNodes().size()); GradStats left_stats(param); left_stats.Add(candidate.split.left_sum); GradStats right_stats(param); right_stats.Add(candidate.split.right_sum); node_value_constraints_[candidate.nid].SetChild( param, parent.split_index(), left_stats, right_stats, &node_value_constraints_[parent.cleft()], &node_value_constraints_[parent.cright()]); // Configure left child auto left_weight = node_value_constraints_[parent.cleft()].CalcWeight(param, left_stats); tree[parent.cleft()].set_leaf(left_weight * param.learning_rate, 0); tree.stat(parent.cleft()).base_weight = left_weight; tree.stat(parent.cleft()).sum_hess = candidate.split.left_sum.GetHess(); // Configure right child auto right_weight = node_value_constraints_[parent.cright()].CalcWeight(param, right_stats); tree[parent.cright()].set_leaf(right_weight * param.learning_rate, 0); tree.stat(parent.cright()).base_weight = right_weight; tree.stat(parent.cright()).sum_hess = candidate.split.right_sum.GetHess(); // Store sum gradients for (auto& shard : shards) { shard->node_sum_gradients[parent.cleft()] = candidate.split.left_sum; shard->node_sum_gradients[parent.cright()] = candidate.split.right_sum; } this->UpdatePosition(candidate, p_tree); } void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat, RegTree* p_tree) { // Temporarily store number of threads so we can change it back later int nthread = omp_get_max_threads(); auto& tree = *p_tree; monitor.Start("InitData"); this->InitData(gpair, p_fmat, *p_tree); monitor.Stop("InitData"); monitor.Start("InitRoot"); this->InitRoot(gpair, p_tree); monitor.Stop("InitRoot"); auto timestamp = qexpand_->size(); auto num_leaves = 1; while (!qexpand_->empty()) { auto candidate = qexpand_->top(); qexpand_->pop(); if (!candidate.IsValid(param, num_leaves)) continue; // std::cout << candidate; monitor.Start("ApplySplit"); this->ApplySplit(candidate, p_tree); monitor.Stop("ApplySplit"); num_leaves++; auto left_child_nidx = tree[candidate.nid].cleft(); auto right_child_nidx = tree[candidate.nid].cright(); // Only create child entries if needed if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx), num_leaves)) { monitor.Start("BuildHist"); this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx); monitor.Stop("BuildHist"); monitor.Start("EvaluateSplits"); auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx}, p_tree); qexpand_->push(ExpandEntry(left_child_nidx, tree.GetDepth(left_child_nidx), splits[0], timestamp++)); qexpand_->push(ExpandEntry(right_child_nidx, tree.GetDepth(right_child_nidx), splits[1], timestamp++)); monitor.Stop("EvaluateSplits"); } } // Reset omp num threads omp_set_num_threads(nthread); } struct ExpandEntry { int nid; int depth; DeviceSplitCandidate split; uint64_t timestamp; ExpandEntry(int nid, int depth, const DeviceSplitCandidate& split, uint64_t timestamp) : nid(nid), depth(depth), split(split), timestamp(timestamp) {} bool IsValid(const TrainParam& param, int num_leaves) const { if (split.loss_chg <= rt_eps) return false; if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) { if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) { os << "ExpandEntry: \n"; os << "nidx: " << e.nid << "\n"; os << "depth: " << e.depth << "\n"; os << "loss: " << e.split.loss_chg << "\n"; os << "left_sum: " << e.split.left_sum << "\n"; os << "right_sum: " << e.split.right_sum << "\n"; return os; } }; inline static bool depth_wise(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.depth == rhs.depth) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.depth > rhs.depth; // favor small depth } } inline static bool loss_guide(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.split.loss_chg == rhs.split.loss_chg) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg } } TrainParam param; common::HistCutMatrix hmat_; common::GHistIndexMatrix gmat_; MetaInfo* info; bool initialised; int n_devices; int n_bins; std::vector<std::unique_ptr<DeviceShard>> shards; ColumnSampler column_sampler; typedef std::priority_queue<ExpandEntry, std::vector<ExpandEntry>, std::function<bool(ExpandEntry, ExpandEntry)>> ExpandQueue; std::unique_ptr<ExpandQueue> qexpand_; common::Monitor monitor; dh::AllReducer reducer; std::vector<ValueConstraint> node_value_constraints_; }; XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMaker(); }); } // namespace tree } // namespace xgboost
1d17ea769f5952c7c8429c8e92eca0203ae17b25.cu
/*! * Copyright 2017 XGBoost contributors */ #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include <thrust/sequence.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <memory> #include <queue> #include <utility> #include <vector> #include "../common/compressed_iterator.h" #include "../common/device_helpers.cuh" #include "../common/hist_util.h" #include "../common/timer.h" #include "param.h" #include "updater_gpu_common.cuh" namespace xgboost { namespace tree { DMLC_REGISTRY_FILE_TAG(updater_gpu_hist); typedef bst_gpair_precise gpair_sum_t; template <int BLOCK_THREADS, typename reduce_t, typename temp_storage_t> __device__ gpair_sum_t ReduceFeature(const gpair_sum_t* begin, const gpair_sum_t* end, temp_storage_t* temp_storage) { __shared__ cub::Uninitialized<gpair_sum_t> uninitialized_sum; gpair_sum_t& shared_sum = uninitialized_sum.Alias(); gpair_sum_t local_sum = gpair_sum_t(); for (auto itr = begin; itr < end; itr += BLOCK_THREADS) { bool thread_active = itr + threadIdx.x < end; // Scan histogram gpair_sum_t bin = thread_active ? *(itr + threadIdx.x) : gpair_sum_t(); local_sum += reduce_t(temp_storage->sum_reduce).Reduce(bin, cub::Sum()); } if (threadIdx.x == 0) { shared_sum = local_sum; } __syncthreads(); return shared_sum; } template <int BLOCK_THREADS, typename reduce_t, typename scan_t, typename max_reduce_t, typename temp_storage_t> __device__ void EvaluateFeature(int fidx, const gpair_sum_t* hist, const int* feature_segments, float min_fvalue, const float* gidx_fvalue_map, DeviceSplitCandidate* best_split, const DeviceNodeStats& node, const GPUTrainingParam& param, temp_storage_t* temp_storage, int constraint, const ValueConstraint& value_constraint) { int gidx_begin = feature_segments[fidx]; int gidx_end = feature_segments[fidx + 1]; gpair_sum_t feature_sum = ReduceFeature<BLOCK_THREADS, reduce_t>( hist + gidx_begin, hist + gidx_end, temp_storage); auto prefix_op = SumCallbackOp<gpair_sum_t>(); for (int scan_begin = gidx_begin; scan_begin < gidx_end; scan_begin += BLOCK_THREADS) { bool thread_active = scan_begin + threadIdx.x < gidx_end; gpair_sum_t bin = thread_active ? hist[scan_begin + threadIdx.x] : gpair_sum_t(); scan_t(temp_storage->scan).ExclusiveScan(bin, bin, cub::Sum(), prefix_op); // Calculate gain gpair_sum_t parent_sum = gpair_sum_t(node.sum_gradients); gpair_sum_t missing = parent_sum - feature_sum; bool missing_left = true; const float null_gain = -FLT_MAX; float gain = null_gain; if (thread_active) { gain = loss_chg_missing(bin, missing, parent_sum, node.root_gain, param, constraint, value_constraint, missing_left); } __syncthreads(); // Find thread with best gain cub::KeyValuePair<int, float> tuple(threadIdx.x, gain); cub::KeyValuePair<int, float> best = max_reduce_t(temp_storage->max_reduce).Reduce(tuple, cub::ArgMax()); __shared__ cub::KeyValuePair<int, float> block_max; if (threadIdx.x == 0) { block_max = best; } __syncthreads(); // Best thread updates split if (threadIdx.x == block_max.key) { int gidx = scan_begin + threadIdx.x; float fvalue = gidx == gidx_begin ? min_fvalue : gidx_fvalue_map[gidx - 1]; gpair_sum_t left = missing_left ? bin + missing : bin; gpair_sum_t right = parent_sum - left; best_split->Update(gain, missing_left ? LeftDir : RightDir, fvalue, fidx, left, right, param); } __syncthreads(); } } template <int BLOCK_THREADS> __global__ void evaluate_split_kernel( const gpair_sum_t* d_hist, int nidx, uint64_t n_features, DeviceNodeStats nodes, const int* d_feature_segments, const float* d_fidx_min_map, const float* d_gidx_fvalue_map, GPUTrainingParam gpu_param, DeviceSplitCandidate* d_split, ValueConstraint value_constraint, int* d_monotonic_constraints) { typedef cub::KeyValuePair<int, float> ArgMaxT; typedef cub::BlockScan<gpair_sum_t, BLOCK_THREADS, cub::BLOCK_SCAN_WARP_SCANS> BlockScanT; typedef cub::BlockReduce<ArgMaxT, BLOCK_THREADS> MaxReduceT; typedef cub::BlockReduce<gpair_sum_t, BLOCK_THREADS> SumReduceT; union TempStorage { typename BlockScanT::TempStorage scan; typename MaxReduceT::TempStorage max_reduce; typename SumReduceT::TempStorage sum_reduce; }; __shared__ cub::Uninitialized<DeviceSplitCandidate> uninitialized_split; DeviceSplitCandidate& best_split = uninitialized_split.Alias(); __shared__ TempStorage temp_storage; if (threadIdx.x == 0) { best_split = DeviceSplitCandidate(); } __syncthreads(); auto fidx = blockIdx.x; auto constraint = d_monotonic_constraints[fidx]; EvaluateFeature<BLOCK_THREADS, SumReduceT, BlockScanT, MaxReduceT>( fidx, d_hist, d_feature_segments, d_fidx_min_map[fidx], d_gidx_fvalue_map, &best_split, nodes, gpu_param, &temp_storage, constraint, value_constraint); __syncthreads(); if (threadIdx.x == 0) { // Record best loss d_split[fidx] = best_split; } } // Find a gidx value for a given feature otherwise return -1 if not found template <typename gidx_iter_t> __device__ int BinarySearchRow(bst_uint begin, bst_uint end, gidx_iter_t data, int fidx_begin, int fidx_end) { bst_uint previous_middle = UINT32_MAX; while (end != begin) { auto middle = begin + (end - begin) / 2; if (middle == previous_middle) { break; } previous_middle = middle; auto gidx = data[middle]; if (gidx >= fidx_begin && gidx < fidx_end) { return gidx; } else if (gidx < fidx_begin) { begin = middle; } else { end = middle; } } // Value is missing return -1; } struct DeviceHistogram { dh::bulk_allocator<dh::memory_type::DEVICE> ba; dh::dvec<gpair_sum_t> data; int n_bins; void Init(int device_idx, int max_nodes, int n_bins, bool silent) { this->n_bins = n_bins; ba.allocate(device_idx, silent, &data, size_t(max_nodes) * size_t(n_bins)); } void Reset() { data.fill(gpair_sum_t()); } gpair_sum_t* GetHistPtr(int nidx) { return data.data() + nidx * n_bins; } void PrintNidx(int nidx) const { auto h_data = data.as_vector(); std::cout << "nidx " << nidx << ":\n"; for (int i = n_bins * nidx; i < n_bins * (nidx + 1); i++) { std::cout << h_data[i] << " "; } std::cout << "\n"; } }; // Manage memory for a single GPU struct DeviceShard { struct Segment { size_t begin; size_t end; Segment() : begin(0), end(0) {} Segment(size_t begin, size_t end) : begin(begin), end(end) { CHECK_GE(end, begin); } size_t Size() const { return end - begin; } }; int device_idx; int normalised_device_idx; // Device index counting from param.gpu_id dh::bulk_allocator<dh::memory_type::DEVICE> ba; dh::dvec<common::compressed_byte_t> gidx_buffer; dh::dvec<bst_gpair> gpair; dh::dvec2<bst_uint> ridx; // Row index relative to this shard dh::dvec2<int> position; std::vector<Segment> ridx_segments; dh::dvec<int> feature_segments; dh::dvec<float> gidx_fvalue_map; dh::dvec<float> min_fvalue; dh::dvec<int> monotone_constraints; std::vector<bst_gpair> node_sum_gradients; common::CompressedIterator<uint32_t> gidx; int row_stride; bst_uint row_begin_idx; // The row offset for this shard bst_uint row_end_idx; bst_uint n_rows; int n_bins; int null_gidx_value; DeviceHistogram hist; TrainParam param; int64_t* tmp_pinned; // Small amount of staging memory std::vector<cudaStream_t> streams; dh::CubMemory temp_memory; DeviceShard(int device_idx, int normalised_device_idx, const common::GHistIndexMatrix& gmat, bst_uint row_begin, bst_uint row_end, int n_bins, TrainParam param) : device_idx(device_idx), normalised_device_idx(normalised_device_idx), row_begin_idx(row_begin), row_end_idx(row_end), n_rows(row_end - row_begin), n_bins(n_bins), null_gidx_value(n_bins), param(param) { // Convert to ELLPACK matrix representation int max_elements_row = 0; for (auto i = row_begin; i < row_end; i++) { max_elements_row = (std::max)(max_elements_row, static_cast<int>(gmat.row_ptr[i + 1] - gmat.row_ptr[i])); } row_stride = max_elements_row; std::vector<int> ellpack_matrix(row_stride * n_rows, null_gidx_value); for (auto i = row_begin; i < row_end; i++) { int row_count = 0; for (auto j = gmat.row_ptr[i]; j < gmat.row_ptr[i + 1]; j++) { ellpack_matrix[(i - row_begin) * row_stride + row_count] = gmat.index[j]; row_count++; } } // Allocate int num_symbols = n_bins + 1; size_t compressed_size_bytes = common::CompressedBufferWriter::CalculateBufferSize( ellpack_matrix.size(), num_symbols); CHECK(!(param.max_leaves == 0 && param.max_depth == 0)) << "Max leaves and max depth cannot both be unconstrained for " "gpu_hist."; int max_nodes = param.max_leaves > 0 ? param.max_leaves * 2 : n_nodes(param.max_depth); ba.allocate(device_idx, param.silent, &gidx_buffer, compressed_size_bytes, &gpair, n_rows, &ridx, n_rows, &position, n_rows, &feature_segments, gmat.cut->row_ptr.size(), &gidx_fvalue_map, gmat.cut->cut.size(), &min_fvalue, gmat.cut->min_val.size(), &monotone_constraints, param.monotone_constraints.size()); gidx_fvalue_map = gmat.cut->cut; min_fvalue = gmat.cut->min_val; feature_segments = gmat.cut->row_ptr; monotone_constraints = param.monotone_constraints; node_sum_gradients.resize(max_nodes); ridx_segments.resize(max_nodes); // Compress gidx common::CompressedBufferWriter cbw(num_symbols); std::vector<common::compressed_byte_t> host_buffer(gidx_buffer.size()); cbw.Write(host_buffer.data(), ellpack_matrix.begin(), ellpack_matrix.end()); gidx_buffer = host_buffer; gidx = common::CompressedIterator<uint32_t>(gidx_buffer.data(), num_symbols); common::CompressedIterator<uint32_t> ci_host(host_buffer.data(), num_symbols); // Init histogram hist.Init(device_idx, max_nodes, gmat.cut->row_ptr.back(), param.silent); dh::safe_cuda(cudaMallocHost(&tmp_pinned, sizeof(int64_t))); } ~DeviceShard() { for (auto& stream : streams) { dh::safe_cuda(cudaStreamDestroy(stream)); } dh::safe_cuda(cudaFreeHost(tmp_pinned)); } // Get vector of at least n initialised streams std::vector<cudaStream_t>& GetStreams(int n) { if (n > streams.size()) { for (auto& stream : streams) { dh::safe_cuda(cudaStreamDestroy(stream)); } streams.clear(); streams.resize(n); for (auto& stream : streams) { dh::safe_cuda(cudaStreamCreate(&stream)); } } return streams; } // Reset values for each update iteration void Reset(const std::vector<bst_gpair>& host_gpair) { dh::safe_cuda(cudaSetDevice(device_idx)); position.current_dvec().fill(0); std::fill(node_sum_gradients.begin(), node_sum_gradients.end(), bst_gpair()); thrust::sequence(ridx.current_dvec().tbegin(), ridx.current_dvec().tend()); std::fill(ridx_segments.begin(), ridx_segments.end(), Segment(0, 0)); ridx_segments.front() = Segment(0, ridx.size()); this->gpair.copy(host_gpair.begin() + row_begin_idx, host_gpair.begin() + row_end_idx); subsample_gpair(&gpair, param.subsample, row_begin_idx); hist.Reset(); } void BuildHist(int nidx) { auto segment = ridx_segments[nidx]; auto d_node_hist = hist.GetHistPtr(nidx); auto d_gidx = gidx; auto d_ridx = ridx.current(); auto d_gpair = gpair.data(); auto row_stride = this->row_stride; auto null_gidx_value = this->null_gidx_value; auto n_elements = segment.Size() * row_stride; dh::launch_n(device_idx, n_elements, [=] __device__(size_t idx) { int ridx = d_ridx[(idx / row_stride) + segment.begin]; int gidx = d_gidx[ridx * row_stride + idx % row_stride]; if (gidx != null_gidx_value) { AtomicAddGpair(d_node_hist + gidx, d_gpair[ridx]); } }); } void SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) { auto d_node_hist_parent = hist.GetHistPtr(nidx_parent); auto d_node_hist_histogram = hist.GetHistPtr(nidx_histogram); auto d_node_hist_subtraction = hist.GetHistPtr(nidx_subtraction); dh::launch_n(device_idx, hist.n_bins, [=] __device__(size_t idx) { d_node_hist_subtraction[idx] = d_node_hist_parent[idx] - d_node_hist_histogram[idx]; }); } __device__ void CountLeft(int64_t* d_count, int val, int left_nidx) { unsigned ballot = __ballot(val == left_nidx); if (threadIdx.x % 32 == 0) { atomicAdd(reinterpret_cast<unsigned long long*>(d_count), // NOLINT static_cast<unsigned long long>(__popc(ballot))); // NOLINT } } void UpdatePosition(int nidx, int left_nidx, int right_nidx, int fidx, int split_gidx, bool default_dir_left, bool is_dense, int fidx_begin, int fidx_end) { dh::safe_cuda(cudaSetDevice(device_idx)); temp_memory.LazyAllocate(sizeof(int64_t)); auto d_left_count = temp_memory.Pointer<int64_t>(); dh::safe_cuda(cudaMemset(d_left_count, 0, sizeof(int64_t))); auto segment = ridx_segments[nidx]; auto d_ridx = ridx.current(); auto d_position = position.current(); auto d_gidx = gidx; auto row_stride = this->row_stride; dh::launch_n<1, 512>( device_idx, segment.Size(), [=] __device__(bst_uint idx) { idx += segment.begin; auto ridx = d_ridx[idx]; auto row_begin = row_stride * ridx; auto row_end = row_begin + row_stride; auto gidx = -1; if (is_dense) { gidx = d_gidx[row_begin + fidx]; } else { gidx = BinarySearchRow(row_begin, row_end, d_gidx, fidx_begin, fidx_end); } int position; if (gidx >= 0) { // Feature is found position = gidx <= split_gidx ? left_nidx : right_nidx; } else { // Feature is missing position = default_dir_left ? left_nidx : right_nidx; } CountLeft(d_left_count, position, left_nidx); d_position[idx] = position; }); dh::safe_cuda(cudaMemcpy(tmp_pinned, d_left_count, sizeof(int64_t), cudaMemcpyDeviceToHost)); auto left_count = *tmp_pinned; SortPosition(segment, left_nidx, right_nidx); // dh::safe_cuda(cudaStreamSynchronize(stream)); ridx_segments[left_nidx] = Segment(segment.begin, segment.begin + left_count); ridx_segments[right_nidx] = Segment(segment.begin + left_count, segment.end); } void SortPosition(const Segment& segment, int left_nidx, int right_nidx) { int min_bits = 0; int max_bits = static_cast<int>( std::ceil(std::log2((std::max)(left_nidx, right_nidx) + 1))); size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, position.current() + segment.begin, position.other() + segment.begin, ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size(), min_bits, max_bits); temp_memory.LazyAllocate(temp_storage_bytes); cub::DeviceRadixSort::SortPairs( temp_memory.d_temp_storage, temp_memory.temp_storage_bytes, position.current() + segment.begin, position.other() + segment.begin, ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size(), min_bits, max_bits); dh::safe_cuda(cudaMemcpy( position.current() + segment.begin, position.other() + segment.begin, segment.Size() * sizeof(int), cudaMemcpyDeviceToDevice)); dh::safe_cuda(cudaMemcpy( ridx.current() + segment.begin, ridx.other() + segment.begin, segment.Size() * sizeof(bst_uint), cudaMemcpyDeviceToDevice)); } }; class GPUHistMaker : public TreeUpdater { public: struct ExpandEntry; GPUHistMaker() : initialised(false) {} ~GPUHistMaker() {} void Init( const std::vector<std::pair<std::string, std::string>>& args) override { param.InitAllowUnknown(args); CHECK(param.n_gpus != 0) << "Must have at least one device"; n_devices = param.n_gpus; dh::check_compute_capability(); if (param.grow_policy == TrainParam::kLossGuide) { qexpand_.reset(new ExpandQueue(loss_guide)); } else { qexpand_.reset(new ExpandQueue(depth_wise)); } monitor.Init("updater_gpu_hist", param.debug_verbose); } void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override { monitor.Start("Update"); GradStats::CheckInfo(dmat->info()); // rescale learning rate according to size of trees float lr = param.learning_rate; param.learning_rate = lr / trees.size(); ValueConstraint::Init(&param, dmat->info().num_col); // build tree try { for (size_t i = 0; i < trees.size(); ++i) { this->UpdateTree(gpair, dmat, trees[i]); } } catch (const std::exception& e) { LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl; } param.learning_rate = lr; monitor.Stop("Update"); } void InitDataOnce(DMatrix* dmat) { info = &dmat->info(); monitor.Start("Quantiles"); hmat_.Init(dmat, param.max_bin); gmat_.cut = &hmat_; gmat_.Init(dmat); monitor.Stop("Quantiles"); n_bins = hmat_.row_ptr.back(); int n_devices = dh::n_devices(param.n_gpus, info->num_row); bst_uint row_begin = 0; bst_uint shard_size = std::ceil(static_cast<double>(info->num_row) / n_devices); std::vector<int> dList(n_devices); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { int device_idx = (param.gpu_id + d_idx) % dh::n_visible_devices(); dList[d_idx] = device_idx; } reducer.Init(dList); // Partition input matrix into row segments std::vector<size_t> row_segments; shards.resize(n_devices); row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { bst_uint row_end = std::min(static_cast<size_t>(row_begin + shard_size), info->num_row); row_segments.push_back(row_end); row_begin = row_end; } // Create device shards omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id] = std::unique_ptr<DeviceShard>( new DeviceShard(dList[cpu_thread_id], cpu_thread_id, gmat_, row_segments[cpu_thread_id], row_segments[cpu_thread_id + 1], n_bins, param)); } initialised = true; } void InitData(const std::vector<bst_gpair>& gpair, DMatrix* dmat, const RegTree& tree) { monitor.Start("InitDataOnce"); if (!initialised) { this->InitDataOnce(dmat); } monitor.Stop("InitDataOnce"); column_sampler.Init(info->num_col, param); // Copy gpair & reset memory monitor.Start("InitDataReset"); omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id]->Reset(gpair); } monitor.Stop("InitDataReset"); } void AllReduceHist(int nidx) { for (auto& shard : shards) { auto d_node_hist = shard->hist.GetHistPtr(nidx); reducer.AllReduceSum( shard->normalised_device_idx, reinterpret_cast<gpair_sum_t::value_t*>(d_node_hist), reinterpret_cast<gpair_sum_t::value_t*>(d_node_hist), n_bins * (sizeof(gpair_sum_t) / sizeof(gpair_sum_t::value_t))); } reducer.Synchronize(); } void BuildHistLeftRight(int nidx_parent, int nidx_left, int nidx_right) { size_t left_node_max_elements = 0; size_t right_node_max_elements = 0; for (auto& shard : shards) { left_node_max_elements = (std::max)( left_node_max_elements, shard->ridx_segments[nidx_left].Size()); right_node_max_elements = (std::max)( right_node_max_elements, shard->ridx_segments[nidx_right].Size()); } auto build_hist_nidx = nidx_left; auto subtraction_trick_nidx = nidx_right; if (right_node_max_elements < left_node_max_elements) { build_hist_nidx = nidx_right; subtraction_trick_nidx = nidx_left; } for (auto& shard : shards) { shard->BuildHist(build_hist_nidx); } this->AllReduceHist(build_hist_nidx); for (auto& shard : shards) { shard->SubtractionTrick(nidx_parent, build_hist_nidx, subtraction_trick_nidx); } } // Returns best loss std::vector<DeviceSplitCandidate> EvaluateSplits( const std::vector<int>& nidx_set, RegTree* p_tree) { auto columns = info->num_col; std::vector<DeviceSplitCandidate> best_splits(nidx_set.size()); std::vector<DeviceSplitCandidate> candidate_splits(nidx_set.size() * columns); // Use first device auto& shard = shards.front(); dh::safe_cuda(cudaSetDevice(shard->device_idx)); shard->temp_memory.LazyAllocate(sizeof(DeviceSplitCandidate) * columns * nidx_set.size()); auto d_split = shard->temp_memory.Pointer<DeviceSplitCandidate>(); auto& streams = shard->GetStreams(static_cast<int>(nidx_set.size())); // Use streams to process nodes concurrently for (auto i = 0; i < nidx_set.size(); i++) { auto nidx = nidx_set[i]; DeviceNodeStats node(shard->node_sum_gradients[nidx], nidx, param); const int BLOCK_THREADS = 256; evaluate_split_kernel<BLOCK_THREADS> <<<uint32_t(columns), BLOCK_THREADS, 0, streams[i]>>>( shard->hist.GetHistPtr(nidx), nidx, info->num_col, node, shard->feature_segments.data(), shard->min_fvalue.data(), shard->gidx_fvalue_map.data(), GPUTrainingParam(param), d_split + i * columns, node_value_constraints_[nidx], shard->monotone_constraints.data()); } dh::safe_cuda( cudaMemcpy(candidate_splits.data(), shard->temp_memory.d_temp_storage, sizeof(DeviceSplitCandidate) * columns * nidx_set.size(), cudaMemcpyDeviceToHost)); for (auto i = 0; i < nidx_set.size(); i++) { auto nidx = nidx_set[i]; DeviceSplitCandidate nidx_best; for (auto fidx = 0; fidx < columns; fidx++) { auto& candidate = candidate_splits[i * columns + fidx]; if (column_sampler.ColumnUsed(candidate.findex, p_tree->GetDepth(nidx))) { nidx_best.Update(candidate_splits[i * columns + fidx], param); } } best_splits[i] = nidx_best; } return std::move(best_splits); } void InitRoot(const std::vector<bst_gpair>& gpair, RegTree* p_tree) { auto root_nidx = 0; // Sum gradients std::vector<bst_gpair> tmp_sums(shards.size()); omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); dh::safe_cuda(cudaSetDevice(shards[cpu_thread_id]->device_idx)); tmp_sums[cpu_thread_id] = thrust::reduce(thrust::cuda::par(shards[cpu_thread_id]->temp_memory), shards[cpu_thread_id]->gpair.tbegin(), shards[cpu_thread_id]->gpair.tend()); } auto sum_gradient = std::accumulate(tmp_sums.begin(), tmp_sums.end(), bst_gpair()); // Generate root histogram for (auto& shard : shards) { shard->BuildHist(root_nidx); } this->AllReduceHist(root_nidx); // Remember root stats p_tree->stat(root_nidx).sum_hess = sum_gradient.GetHess(); p_tree->stat(root_nidx).base_weight = CalcWeight(param, sum_gradient); // Store sum gradients for (auto& shard : shards) { shard->node_sum_gradients[root_nidx] = sum_gradient; } // Initialise root constraint node_value_constraints_.resize(p_tree->GetNodes().size()); // Generate first split auto splits = this->EvaluateSplits({root_nidx}, p_tree); qexpand_->push( ExpandEntry(root_nidx, p_tree->GetDepth(root_nidx), splits.front(), 0)); } void UpdatePosition(const ExpandEntry& candidate, RegTree* p_tree) { auto nidx = candidate.nid; auto left_nidx = (*p_tree)[nidx].cleft(); auto right_nidx = (*p_tree)[nidx].cright(); // convert floating-point split_pt into corresponding bin_id // split_cond = -1 indicates that split_pt is less than all known cut points auto split_gidx = -1; auto fidx = candidate.split.findex; auto default_dir_left = candidate.split.dir == LeftDir; auto fidx_begin = hmat_.row_ptr[fidx]; auto fidx_end = hmat_.row_ptr[fidx + 1]; for (auto i = fidx_begin; i < fidx_end; ++i) { if (candidate.split.fvalue == hmat_.cut[i]) { split_gidx = static_cast<int32_t>(i); } } auto is_dense = info->num_nonzero == info->num_row * info->num_col; omp_set_num_threads(shards.size()); #pragma omp parallel { auto cpu_thread_id = omp_get_thread_num(); shards[cpu_thread_id]->UpdatePosition(nidx, left_nidx, right_nidx, fidx, split_gidx, default_dir_left, is_dense, fidx_begin, fidx_end); } } void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) { // Add new leaves RegTree& tree = *p_tree; tree.AddChilds(candidate.nid); auto& parent = tree[candidate.nid]; parent.set_split(candidate.split.findex, candidate.split.fvalue, candidate.split.dir == LeftDir); tree.stat(candidate.nid).loss_chg = candidate.split.loss_chg; // Set up child constraints node_value_constraints_.resize(tree.GetNodes().size()); GradStats left_stats(param); left_stats.Add(candidate.split.left_sum); GradStats right_stats(param); right_stats.Add(candidate.split.right_sum); node_value_constraints_[candidate.nid].SetChild( param, parent.split_index(), left_stats, right_stats, &node_value_constraints_[parent.cleft()], &node_value_constraints_[parent.cright()]); // Configure left child auto left_weight = node_value_constraints_[parent.cleft()].CalcWeight(param, left_stats); tree[parent.cleft()].set_leaf(left_weight * param.learning_rate, 0); tree.stat(parent.cleft()).base_weight = left_weight; tree.stat(parent.cleft()).sum_hess = candidate.split.left_sum.GetHess(); // Configure right child auto right_weight = node_value_constraints_[parent.cright()].CalcWeight(param, right_stats); tree[parent.cright()].set_leaf(right_weight * param.learning_rate, 0); tree.stat(parent.cright()).base_weight = right_weight; tree.stat(parent.cright()).sum_hess = candidate.split.right_sum.GetHess(); // Store sum gradients for (auto& shard : shards) { shard->node_sum_gradients[parent.cleft()] = candidate.split.left_sum; shard->node_sum_gradients[parent.cright()] = candidate.split.right_sum; } this->UpdatePosition(candidate, p_tree); } void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* p_fmat, RegTree* p_tree) { // Temporarily store number of threads so we can change it back later int nthread = omp_get_max_threads(); auto& tree = *p_tree; monitor.Start("InitData"); this->InitData(gpair, p_fmat, *p_tree); monitor.Stop("InitData"); monitor.Start("InitRoot"); this->InitRoot(gpair, p_tree); monitor.Stop("InitRoot"); auto timestamp = qexpand_->size(); auto num_leaves = 1; while (!qexpand_->empty()) { auto candidate = qexpand_->top(); qexpand_->pop(); if (!candidate.IsValid(param, num_leaves)) continue; // std::cout << candidate; monitor.Start("ApplySplit"); this->ApplySplit(candidate, p_tree); monitor.Stop("ApplySplit"); num_leaves++; auto left_child_nidx = tree[candidate.nid].cleft(); auto right_child_nidx = tree[candidate.nid].cright(); // Only create child entries if needed if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx), num_leaves)) { monitor.Start("BuildHist"); this->BuildHistLeftRight(candidate.nid, left_child_nidx, right_child_nidx); monitor.Stop("BuildHist"); monitor.Start("EvaluateSplits"); auto splits = this->EvaluateSplits({left_child_nidx, right_child_nidx}, p_tree); qexpand_->push(ExpandEntry(left_child_nidx, tree.GetDepth(left_child_nidx), splits[0], timestamp++)); qexpand_->push(ExpandEntry(right_child_nidx, tree.GetDepth(right_child_nidx), splits[1], timestamp++)); monitor.Stop("EvaluateSplits"); } } // Reset omp num threads omp_set_num_threads(nthread); } struct ExpandEntry { int nid; int depth; DeviceSplitCandidate split; uint64_t timestamp; ExpandEntry(int nid, int depth, const DeviceSplitCandidate& split, uint64_t timestamp) : nid(nid), depth(depth), split(split), timestamp(timestamp) {} bool IsValid(const TrainParam& param, int num_leaves) const { if (split.loss_chg <= rt_eps) return false; if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) { if (param.max_depth > 0 && depth == param.max_depth) return false; if (param.max_leaves > 0 && num_leaves == param.max_leaves) return false; return true; } friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) { os << "ExpandEntry: \n"; os << "nidx: " << e.nid << "\n"; os << "depth: " << e.depth << "\n"; os << "loss: " << e.split.loss_chg << "\n"; os << "left_sum: " << e.split.left_sum << "\n"; os << "right_sum: " << e.split.right_sum << "\n"; return os; } }; inline static bool depth_wise(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.depth == rhs.depth) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.depth > rhs.depth; // favor small depth } } inline static bool loss_guide(ExpandEntry lhs, ExpandEntry rhs) { if (lhs.split.loss_chg == rhs.split.loss_chg) { return lhs.timestamp > rhs.timestamp; // favor small timestamp } else { return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg } } TrainParam param; common::HistCutMatrix hmat_; common::GHistIndexMatrix gmat_; MetaInfo* info; bool initialised; int n_devices; int n_bins; std::vector<std::unique_ptr<DeviceShard>> shards; ColumnSampler column_sampler; typedef std::priority_queue<ExpandEntry, std::vector<ExpandEntry>, std::function<bool(ExpandEntry, ExpandEntry)>> ExpandQueue; std::unique_ptr<ExpandQueue> qexpand_; common::Monitor monitor; dh::AllReducer reducer; std::vector<ValueConstraint> node_value_constraints_; }; XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist") .describe("Grow tree with GPU.") .set_body([]() { return new GPUHistMaker(); }); } // namespace tree } // namespace xgboost
4f3d6a9bb6c988606cfdc0ee5e1a4f13358823ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pointwise_scores.cuh" #include "score_calcers.cuh" #include "split_properties_helpers.cuh" #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <cmath> #include <exception> #include <cfloat> namespace NKernel { template <int BLOCK_SIZE> __global__ void FindOptimalSplitSolarImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, int foldCount, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPointwisePartOffsetsHelper helper(foldCount); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } const float* current = binSums + 2 * (i + tid); float score = 0; for (int leaf = 0; leaf < pCount; leaf++) { float leftTotalWeight = 0; float rightTotalWeight = 0; float leftScore = 0; float rightScore = 0; #pragma unroll 4 for (int fold = 0; fold < foldCount; fold += 2) { TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold)); TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1)); float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2]; float weightEstimateRight = partLearn.Weight - weightEstimateLeft; float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1]; float sumEstimateRight = partLearn.Sum - sumEstimateLeft; float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2]; float weightTestRight = partTest.Weight - weightTestLeft; float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1]; float sumTestRight = partTest.Sum - sumTestLeft; { const float mu = weightEstimateLeft > 0.0f ? (sumEstimateLeft / (weightEstimateLeft + 1e-15f)) : 0; leftScore += -2 * mu * sumTestLeft + weightTestLeft * mu * mu; leftTotalWeight += weightTestLeft; } { const float mu = weightEstimateRight > 0.0f ? (sumEstimateRight / (weightEstimateRight + 1e-15f)) : 0; rightTotalWeight += weightTestRight; rightScore += -2 * mu * sumTestRight + weightTestRight * mu * mu; } } score += leftTotalWeight > 2 ? leftScore * (1 + 2 * log(leftTotalWeight + 1)) : 0; score += rightTotalWeight > 2 ? rightScore * (1 + 2 * log(rightTotalWeight + 1)) : 0; } if (score < bestScore) { bestScore = score; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if ( scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { const int index = indices[0]; result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0; result->BinId = index < binFeatureCount ? bf[index].BinId : 0; result->Score = scores[0]; } } class TDirectHistLoader { public: __forceinline__ __device__ TDirectHistLoader(const float* binSums, TPointwisePartOffsetsHelper& helper, int binFeatureId, int /* leaf count*/, int binFeatureCount) : BinSums(binSums + 2 * binFeatureId) , Helper(helper) , BinFeatureCount(binFeatureCount) { } __forceinline__ __device__ float LoadWeight(int leaf) { return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2]; } __forceinline__ __device__ float LoadSum(int leaf) { return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2 + 1]; } private: const float* BinSums; TPointwisePartOffsetsHelper& Helper; int BinFeatureCount; }; class TGatheredByLeavesHistLoader { public: __forceinline__ __device__ TGatheredByLeavesHistLoader(const float* binSums, TPointwisePartOffsetsHelper&, int binFeatureId, int leafCount, int /*binFeatureCount*/) : BinSums(binSums) , LeafCount(leafCount) , FeatureId(binFeatureId) { } __forceinline__ __device__ int GetOffset(int leaf) { return 2 * (FeatureId * LeafCount + leaf); } __forceinline__ __device__ float LoadWeight(int leaf) { return BinSums[GetOffset(leaf)]; } __forceinline__ __device__ float LoadSum(int leaf) { return BinSums[GetOffset(leaf) + 1]; } private: const float* BinSums; int LeafCount; int FeatureId; }; template <int BLOCK_SIZE, class THistLoader, class TScoreCalcer> __global__ void FindOptimalSplitSingleFoldImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPointwisePartOffsetsHelper helper(1); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } calcer.NextFeature(bf[i + tid]); THistLoader histLoader(binSums, helper, i + tid, pCount, binFeatureCount); for (int leaf = 0; leaf < pCount; leaf++) { TPartitionStatistics part = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, 0)); float weightLeft = histLoader.LoadWeight(leaf); float weightRight = max(part.Weight - weightLeft, 0.0f); float sumLeft = histLoader.LoadSum(leaf); float sumRight = static_cast<float>(part.Sum - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); } const float score = calcer.GetScore(); if (score < bestScore) { bestScore = score; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if ( scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { const int index = indices[0]; result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0; result->BinId = index < binFeatureCount ? bf[index].BinId : 0; result->Score = scores[0]; } } template <int BLOCK_SIZE> __global__ void FindOptimalSplitCorrelationImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, int foldCount, double l2, bool normalize, double scoreStdDev, ui64 globalSeed, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPointwisePartOffsetsHelper helper(foldCount); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } float score = 0; float denumSqr = 1e-20f; const float* current = binSums + 2 * (i + tid); for (int leaf = 0; leaf < pCount; leaf++) { #pragma unroll 4 for (int fold = 0; fold < foldCount; fold += 2) { TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold)); TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1)); float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2]; float weightEstimateRight = max(partLearn.Weight - weightEstimateLeft, 0.0f); float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1]; float sumEstimateRight = partLearn.Sum - sumEstimateLeft; float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2]; float weightTestRight = max(partTest.Weight - weightTestLeft, 0.0f); float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1]; float sumTestRight = partTest.Sum - sumTestLeft; { double lambda = normalize ? l2 * weightEstimateLeft : l2; const float mu = weightEstimateLeft > 0 ? (sumEstimateLeft / (weightEstimateLeft + lambda)) : 0; score += sumTestLeft * mu; denumSqr += weightTestLeft * mu * mu; } { double lambda = normalize ? l2 * weightEstimateRight : l2; const float mu = weightEstimateRight > 0 ? (sumEstimateRight / (weightEstimateRight + lambda)) : 0; score += sumTestRight * mu; denumSqr += weightTestRight * mu * mu; } } } score = denumSqr > 1e-15f ? -score / sqrt(denumSqr) : FLT_MAX; float tmp = score; if (scoreStdDev) { ui64 seed = globalSeed + bf[i + tid].FeatureId; AdvanceSeed(&seed, 4); tmp += NextNormal(&seed) * scoreStdDev; } if (tmp < bestScore) { bestScore = tmp; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { const int index = indices[0]; result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0; result->BinId = index < binFeatureCount ? bf[index].BinId : 0; result->Score = scores[0]; } } void FindOptimalSplitDynamic(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; switch (scoreFunction) { case EScoreFunction::SolarL2: { FindOptimalSplitSolarImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result); break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { FindOptimalSplitCorrelationImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, l2, normalize, scoreStdDev, seed, result); break; } default: { throw std::exception(); } } } template <class TLoader> void FindOptimalSplitPlain(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; #define RUN() \ FindOptimalSplitSingleFoldImpl<blockSize, TLoader, TScoreCalcer> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { using TScoreCalcer = TCorrelationScoreCalcer; TCorrelationScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } void FindOptimalSplit(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, bool gatheredByLeaves, TCudaStream stream) { if (foldCount == 1) { if (gatheredByLeaves) { using THistLoader = TGatheredByLeavesHistLoader; FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } else { using THistLoader = TDirectHistLoader; FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } } else { FindOptimalSplitDynamic(binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } } template <int BLOCK_SIZE, int HIST_COUNT> __global__ void GatherHistogramsByLeavesImpl(const int binFeatureCount, const float* histogram, const int histCount, const int leafCount, const int foldCount, float* result) { const int featuresPerBlock = BLOCK_SIZE / leafCount; const int featureId = blockIdx.x * featuresPerBlock + threadIdx.x / leafCount; const int leafId = threadIdx.x & (leafCount - 1); const int foldId = blockIdx.y; TPointwisePartOffsetsHelper helper(gridDim.y); if (featureId < binFeatureCount) { float leafVals[HIST_COUNT]; #pragma unroll for (int histId = 0; histId < HIST_COUNT; ++histId) { leafVals[histId] = LdgWithFallback(histogram, (featureId + (size_t)binFeatureCount * helper.GetHistogramOffset(leafId, foldId)) * HIST_COUNT + histId); } #pragma unroll for (int histId = 0; histId < HIST_COUNT; ++histId) { const ui64 idx = ((size_t)featureId * leafCount * foldCount + leafId * foldCount + foldId) * HIST_COUNT + histId; result[idx] = leafVals[histId]; } } } bool GatherHistogramByLeaves(const float* histogram, const ui32 binFeatureCount, const ui32 histCount, const ui32 leafCount, const ui32 foldCount, float* result, TCudaStream stream ) { const int blockSize = 1024; dim3 numBlocks; numBlocks.x = (binFeatureCount + (blockSize / leafCount) - 1) / (blockSize / leafCount); numBlocks.y = foldCount; numBlocks.z = 1; switch (histCount) { case 1: { hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 1>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } case 2: { hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 2>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } case 4: { hipLaunchKernelGGL(( GatherHistogramsByLeavesImpl<blockSize, 4>) , dim3(numBlocks), dim3(blockSize), 0, stream, binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } default: { return false; } } } template <int BLOCK_SIZE> __global__ void PartitionUpdateImpl(const float* target, const float* weights, const float* counts, const struct TDataPartition* parts, struct TPartitionStatistics* partStats) { const int tid = threadIdx.x; parts += blockIdx.x; partStats += blockIdx.x; const int size = parts->Size; __shared__ volatile double localBuffer[BLOCK_SIZE]; double tmp = 0; if (weights != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(weights + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } if (tid == 0) { partStats->Weight = tmp; } tmp = 0; __syncthreads(); if (target != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(target + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } if (tid == 0) { partStats->Sum = tmp; } tmp = 0; __syncthreads(); if (counts != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(counts + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } else { tmp = size; } if (tid == 0) { partStats->Count = tmp; } } void UpdatePartitionProps(const float* target, const float* weights, const float* counts, const struct TDataPartition* parts, struct TPartitionStatistics* partStats, int partsCount, TCudaStream stream ) { const int blockSize = 1024; if (partsCount) { PartitionUpdateImpl<blockSize> << < partsCount, blockSize, 0, stream >> > (target, weights, counts, parts, partStats); } } }
4f3d6a9bb6c988606cfdc0ee5e1a4f13358823ac.cu
#include "pointwise_scores.cuh" #include "score_calcers.cuh" #include "split_properties_helpers.cuh" #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <cmath> #include <exception> #include <cfloat> namespace NKernel { template <int BLOCK_SIZE> __global__ void FindOptimalSplitSolarImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, int foldCount, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPointwisePartOffsetsHelper helper(foldCount); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } const float* current = binSums + 2 * (i + tid); float score = 0; for (int leaf = 0; leaf < pCount; leaf++) { float leftTotalWeight = 0; float rightTotalWeight = 0; float leftScore = 0; float rightScore = 0; #pragma unroll 4 for (int fold = 0; fold < foldCount; fold += 2) { TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold)); TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1)); float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2]; float weightEstimateRight = partLearn.Weight - weightEstimateLeft; float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1]; float sumEstimateRight = partLearn.Sum - sumEstimateLeft; float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2]; float weightTestRight = partTest.Weight - weightTestLeft; float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1]; float sumTestRight = partTest.Sum - sumTestLeft; { const float mu = weightEstimateLeft > 0.0f ? (sumEstimateLeft / (weightEstimateLeft + 1e-15f)) : 0; leftScore += -2 * mu * sumTestLeft + weightTestLeft * mu * mu; leftTotalWeight += weightTestLeft; } { const float mu = weightEstimateRight > 0.0f ? (sumEstimateRight / (weightEstimateRight + 1e-15f)) : 0; rightTotalWeight += weightTestRight; rightScore += -2 * mu * sumTestRight + weightTestRight * mu * mu; } } score += leftTotalWeight > 2 ? leftScore * (1 + 2 * log(leftTotalWeight + 1)) : 0; score += rightTotalWeight > 2 ? rightScore * (1 + 2 * log(rightTotalWeight + 1)) : 0; } if (score < bestScore) { bestScore = score; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if ( scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { const int index = indices[0]; result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0; result->BinId = index < binFeatureCount ? bf[index].BinId : 0; result->Score = scores[0]; } } class TDirectHistLoader { public: __forceinline__ __device__ TDirectHistLoader(const float* binSums, TPointwisePartOffsetsHelper& helper, int binFeatureId, int /* leaf count*/, int binFeatureCount) : BinSums(binSums + 2 * binFeatureId) , Helper(helper) , BinFeatureCount(binFeatureCount) { } __forceinline__ __device__ float LoadWeight(int leaf) { return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2]; } __forceinline__ __device__ float LoadSum(int leaf) { return BinSums[(size_t)BinFeatureCount * Helper.GetHistogramOffset(leaf, 0) * 2 + 1]; } private: const float* BinSums; TPointwisePartOffsetsHelper& Helper; int BinFeatureCount; }; class TGatheredByLeavesHistLoader { public: __forceinline__ __device__ TGatheredByLeavesHistLoader(const float* binSums, TPointwisePartOffsetsHelper&, int binFeatureId, int leafCount, int /*binFeatureCount*/) : BinSums(binSums) , LeafCount(leafCount) , FeatureId(binFeatureId) { } __forceinline__ __device__ int GetOffset(int leaf) { return 2 * (FeatureId * LeafCount + leaf); } __forceinline__ __device__ float LoadWeight(int leaf) { return BinSums[GetOffset(leaf)]; } __forceinline__ __device__ float LoadSum(int leaf) { return BinSums[GetOffset(leaf) + 1]; } private: const float* BinSums; int LeafCount; int FeatureId; }; template <int BLOCK_SIZE, class THistLoader, class TScoreCalcer> __global__ void FindOptimalSplitSingleFoldImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, TScoreCalcer calcer, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPointwisePartOffsetsHelper helper(1); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } calcer.NextFeature(bf[i + tid]); THistLoader histLoader(binSums, helper, i + tid, pCount, binFeatureCount); for (int leaf = 0; leaf < pCount; leaf++) { TPartitionStatistics part = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, 0)); float weightLeft = histLoader.LoadWeight(leaf); float weightRight = max(part.Weight - weightLeft, 0.0f); float sumLeft = histLoader.LoadSum(leaf); float sumRight = static_cast<float>(part.Sum - sumLeft); calcer.AddLeaf(sumLeft, weightLeft); calcer.AddLeaf(sumRight, weightRight); } const float score = calcer.GetScore(); if (score < bestScore) { bestScore = score; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if ( scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { const int index = indices[0]; result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0; result->BinId = index < binFeatureCount ? bf[index].BinId : 0; result->Score = scores[0]; } } template <int BLOCK_SIZE> __global__ void FindOptimalSplitCorrelationImpl(const TCBinFeature* bf, int binFeatureCount, const float* binSums, const TPartitionStatistics* parts, int pCount, int foldCount, double l2, bool normalize, double scoreStdDev, ui64 globalSeed, TBestSplitProperties* result) { float bestScore = FLT_MAX; int bestIndex = 0; int tid = threadIdx.x; result += blockIdx.x; TPointwisePartOffsetsHelper helper(foldCount); for (int i = blockIdx.x * BLOCK_SIZE; i < binFeatureCount; i += BLOCK_SIZE * gridDim.x) { if (i + tid >= binFeatureCount) { break; } float score = 0; float denumSqr = 1e-20f; const float* current = binSums + 2 * (i + tid); for (int leaf = 0; leaf < pCount; leaf++) { #pragma unroll 4 for (int fold = 0; fold < foldCount; fold += 2) { TPartitionStatistics partLearn = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold)); TPartitionStatistics partTest = LdgWithFallback(parts, helper.GetDataPartitionOffset(leaf, fold + 1)); float weightEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2]; float weightEstimateRight = max(partLearn.Weight - weightEstimateLeft, 0.0f); float sumEstimateLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold) * 2 + 1]; float sumEstimateRight = partLearn.Sum - sumEstimateLeft; float weightTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2]; float weightTestRight = max(partTest.Weight - weightTestLeft, 0.0f); float sumTestLeft = current[(size_t)binFeatureCount * helper.GetHistogramOffset(leaf, fold + 1) * 2 + 1]; float sumTestRight = partTest.Sum - sumTestLeft; { double lambda = normalize ? l2 * weightEstimateLeft : l2; const float mu = weightEstimateLeft > 0 ? (sumEstimateLeft / (weightEstimateLeft + lambda)) : 0; score += sumTestLeft * mu; denumSqr += weightTestLeft * mu * mu; } { double lambda = normalize ? l2 * weightEstimateRight : l2; const float mu = weightEstimateRight > 0 ? (sumEstimateRight / (weightEstimateRight + lambda)) : 0; score += sumTestRight * mu; denumSqr += weightTestRight * mu * mu; } } } score = denumSqr > 1e-15f ? -score / sqrt(denumSqr) : FLT_MAX; float tmp = score; if (scoreStdDev) { ui64 seed = globalSeed + bf[i + tid].FeatureId; AdvanceSeed(&seed, 4); tmp += NextNormal(&seed) * scoreStdDev; } if (tmp < bestScore) { bestScore = tmp; bestIndex = i + tid; } } __shared__ float scores[BLOCK_SIZE]; scores[tid] = bestScore; __shared__ int indices[BLOCK_SIZE]; indices[tid] = bestIndex; __syncthreads(); for (ui32 s = BLOCK_SIZE >> 1; s > 0; s >>= 1) { if (tid < s) { if (scores[tid] > scores[tid + s] || (scores[tid] == scores[tid + s] && indices[tid] > indices[tid + s]) ) { scores[tid] = scores[tid + s]; indices[tid] = indices[tid + s]; } } __syncthreads(); } if (!tid) { const int index = indices[0]; result->FeatureId = index < binFeatureCount ? bf[index].FeatureId : 0; result->BinId = index < binFeatureCount ? bf[index].BinId : 0; result->Score = scores[0]; } } void FindOptimalSplitDynamic(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; switch (scoreFunction) { case EScoreFunction::SolarL2: { FindOptimalSplitSolarImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result); break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { FindOptimalSplitCorrelationImpl<blockSize> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, l2, normalize, scoreStdDev, seed, result); break; } default: { throw std::exception(); } } } template <class TLoader> void FindOptimalSplitPlain(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, TCudaStream stream) { const int blockSize = 128; #define RUN() \ FindOptimalSplitSingleFoldImpl<blockSize, TLoader, TScoreCalcer> << < resultSize, blockSize, 0, stream >> > (binaryFeatures, binaryFeatureCount, splits, parts, pCount, scoreCalcer, result); switch (scoreFunction) { case EScoreFunction::SolarL2: { using TScoreCalcer = TSolarScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::SatL2: { using TScoreCalcer = TSatL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::LOOL2: { using TScoreCalcer = TLOOL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::L2: case EScoreFunction::NewtonL2: { using TScoreCalcer = TL2ScoreCalcer; TScoreCalcer scoreCalcer(static_cast<float>(l2)); RUN() break; } case EScoreFunction::Correlation: case EScoreFunction::NewtonCorrelation: { using TScoreCalcer = TCorrelationScoreCalcer; TCorrelationScoreCalcer scoreCalcer(static_cast<float>(l2), normalize, static_cast<float>(scoreStdDev), seed); RUN() break; } default: { throw std::exception(); } } #undef RUN } void FindOptimalSplit(const TCBinFeature* binaryFeatures,ui32 binaryFeatureCount, const float* splits, const TPartitionStatistics* parts, ui32 pCount, ui32 foldCount, TBestSplitProperties* result, ui32 resultSize, EScoreFunction scoreFunction, double l2, bool normalize, double scoreStdDev, ui64 seed, bool gatheredByLeaves, TCudaStream stream) { if (foldCount == 1) { if (gatheredByLeaves) { using THistLoader = TGatheredByLeavesHistLoader; FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } else { using THistLoader = TDirectHistLoader; FindOptimalSplitPlain<THistLoader>(binaryFeatures, binaryFeatureCount, splits, parts, pCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } } else { FindOptimalSplitDynamic(binaryFeatures, binaryFeatureCount, splits, parts, pCount, foldCount, result, resultSize, scoreFunction, l2, normalize, scoreStdDev, seed, stream); } } template <int BLOCK_SIZE, int HIST_COUNT> __global__ void GatherHistogramsByLeavesImpl(const int binFeatureCount, const float* histogram, const int histCount, const int leafCount, const int foldCount, float* result) { const int featuresPerBlock = BLOCK_SIZE / leafCount; const int featureId = blockIdx.x * featuresPerBlock + threadIdx.x / leafCount; const int leafId = threadIdx.x & (leafCount - 1); const int foldId = blockIdx.y; TPointwisePartOffsetsHelper helper(gridDim.y); if (featureId < binFeatureCount) { float leafVals[HIST_COUNT]; #pragma unroll for (int histId = 0; histId < HIST_COUNT; ++histId) { leafVals[histId] = LdgWithFallback(histogram, (featureId + (size_t)binFeatureCount * helper.GetHistogramOffset(leafId, foldId)) * HIST_COUNT + histId); } #pragma unroll for (int histId = 0; histId < HIST_COUNT; ++histId) { const ui64 idx = ((size_t)featureId * leafCount * foldCount + leafId * foldCount + foldId) * HIST_COUNT + histId; result[idx] = leafVals[histId]; } } } bool GatherHistogramByLeaves(const float* histogram, const ui32 binFeatureCount, const ui32 histCount, const ui32 leafCount, const ui32 foldCount, float* result, TCudaStream stream ) { const int blockSize = 1024; dim3 numBlocks; numBlocks.x = (binFeatureCount + (blockSize / leafCount) - 1) / (blockSize / leafCount); numBlocks.y = foldCount; numBlocks.z = 1; switch (histCount) { case 1: { GatherHistogramsByLeavesImpl<blockSize, 1> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } case 2: { GatherHistogramsByLeavesImpl<blockSize, 2> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } case 4: { GatherHistogramsByLeavesImpl<blockSize, 4> <<<numBlocks, blockSize, 0, stream>>>(binFeatureCount, histogram, histCount, leafCount, foldCount, result); return true; } default: { return false; } } } template <int BLOCK_SIZE> __global__ void PartitionUpdateImpl(const float* target, const float* weights, const float* counts, const struct TDataPartition* parts, struct TPartitionStatistics* partStats) { const int tid = threadIdx.x; parts += blockIdx.x; partStats += blockIdx.x; const int size = parts->Size; __shared__ volatile double localBuffer[BLOCK_SIZE]; double tmp = 0; if (weights != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(weights + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } if (tid == 0) { partStats->Weight = tmp; } tmp = 0; __syncthreads(); if (target != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(target + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } if (tid == 0) { partStats->Sum = tmp; } tmp = 0; __syncthreads(); if (counts != 0) { localBuffer[tid] = ComputeSum<BLOCK_SIZE>(counts + parts->Offset, size); __syncthreads(); tmp = Reduce<double, BLOCK_SIZE>(localBuffer); } else { tmp = size; } if (tid == 0) { partStats->Count = tmp; } } void UpdatePartitionProps(const float* target, const float* weights, const float* counts, const struct TDataPartition* parts, struct TPartitionStatistics* partStats, int partsCount, TCudaStream stream ) { const int blockSize = 1024; if (partsCount) { PartitionUpdateImpl<blockSize> << < partsCount, blockSize, 0, stream >> > (target, weights, counts, parts, partStats); } } }
a5e6b5c6e8466161ea23dc94f0997f434d2f6c7e.hip
// !!! This is a file automatically generated by hipify!!! /* Reference: https://x.momo86.net/en?p=113 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #define NUM_OF_BLOCKS 1048576 #define NUM_OF_THREADS 256 __device__ half2 half_max(const half2 a, const half2 b) { const half2 sub = __hsub2(a, b); const unsigned sign = (*reinterpret_cast<const unsigned*>(&sub)) & 0x80008000u; const unsigned sw = 0x00003210 | (((sign >> 21) | (sign >> 13)) * 0x11); const unsigned int res = __byte_perm(*reinterpret_cast<const unsigned*>(&a), *reinterpret_cast<const unsigned*>(&b), sw); return *reinterpret_cast<const half2*>(&res); } __device__ half half_max(const half a, const half b) { const half sub = __hsub(a, b); const unsigned sign = (*reinterpret_cast<const short*>(&sub)) & 0x8000u; const unsigned sw = 0x00000010 | ((sign >> 13) * 0x11); const unsigned short res = __byte_perm(*reinterpret_cast<const short*>(&a), *reinterpret_cast<const short*>(&b), sw); return *reinterpret_cast<const half*>(&res); } template <typename T> __global__ void hmax(T const *__restrict__ const a, T const *__restrict__ const b, T *__restrict__ const r, const size_t size) { for (size_t i = threadIdx.x + blockDim.x * blockIdx.x; i < size; i += blockDim.x * gridDim.x) r[i] = half_max(a[i], b[i]); } void generateInput(half2 * a, size_t size) { for (size_t i = 0; i < size; ++i) { half2 temp; temp.x = static_cast<float>(rand() % 922021); temp.y = static_cast<float>(rand() % 922021); a[i] = temp; } } // compute the maximum of two values int main(int argc, char *argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); size_t size = (size_t)NUM_OF_BLOCKS * NUM_OF_THREADS; const size_t size_bytes = size * sizeof(half2); half2 * a, *b, *r; half2 * d_a, *d_b, *d_r; a = (half2*) malloc (size_bytes); b = (half2*) malloc (size_bytes); r = (half2*) malloc (size_bytes); hipMalloc((void**)&d_a, size_bytes); hipMalloc((void**)&d_b, size_bytes); hipMalloc((void**)&d_r, size_bytes); // initialize input values srand(123); generateInput(a, size); hipMemcpy(d_a, a, size_bytes, hipMemcpyHostToDevice); generateInput(b, size); hipMemcpy(d_b, b, size_bytes, hipMemcpyHostToDevice); for (int i = 0; i < repeat; i++) hipLaunchKernelGGL(( hmax<half2>), dim3(NUM_OF_BLOCKS), dim3(NUM_OF_THREADS), 0, 0, d_a, d_b, d_r, size); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); // run hmax2 for (int i = 0; i < repeat; i++) hipLaunchKernelGGL(( hmax<half2>), dim3(NUM_OF_BLOCKS), dim3(NUM_OF_THREADS), 0, 0, d_a, d_b, d_r, size); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (us)\n", (time * 1e-3f) / repeat); // verify hipMemcpy(r, d_r, size_bytes, hipMemcpyDeviceToHost); bool ok = true; for (size_t i = 0; i < size; ++i) { float2 fa = __half22float2(a[i]); float2 fb = __half22float2(b[i]); float2 fr = __half22float2(r[i]); float x = fmaxf(fa.x, fb.x); float y = fmaxf(fa.y, fb.y); if (fabsf(fr.x - x) > 1e-3 || fabsf(fr.y - y) > 1e-3) { ok = false; break; } } printf("fp16_hmax2 %s\n", ok ? "PASS" : "FAIL"); for (int i = 0; i < repeat; i++) hipLaunchKernelGGL(( hmax<half>), dim3(NUM_OF_BLOCKS), dim3(NUM_OF_THREADS), 0, 0, (half*)d_a, (half*)d_b, (half*)d_r, size*2); hipDeviceSynchronize(); start = std::chrono::steady_clock::now(); // run hmax (the size is doubled) for (int i = 0; i < repeat; i++) hipLaunchKernelGGL(( hmax<half>), dim3(NUM_OF_BLOCKS), dim3(NUM_OF_THREADS), 0, 0, (half*)d_a, (half*)d_b, (half*)d_r, size*2); hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (us)\n", (time * 1e-3f) / repeat); hipMemcpy(r, d_r, size_bytes, hipMemcpyDeviceToHost); // verify ok = true; for (size_t i = 0; i < size; ++i) { float2 fa = __half22float2(a[i]); float2 fb = __half22float2(b[i]); float2 fr = __half22float2(r[i]); float x = fmaxf(fa.x, fb.x); float y = fmaxf(fa.y, fb.y); if (fabsf(fr.x - x) > 1e-3 || fabsf(fr.y - y) > 1e-3) { ok = false; break; } } printf("fp16_hmax %s\n", ok ? "PASS" : "FAIL"); hipFree(d_a); hipFree(d_b); hipFree(d_r); free(a); free(b); free(r); return EXIT_SUCCESS; }
a5e6b5c6e8466161ea23dc94f0997f434d2f6c7e.cu
/* Reference: https://x.momo86.net/en?p=113 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <chrono> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #define NUM_OF_BLOCKS 1048576 #define NUM_OF_THREADS 256 __device__ half2 half_max(const half2 a, const half2 b) { const half2 sub = __hsub2(a, b); const unsigned sign = (*reinterpret_cast<const unsigned*>(&sub)) & 0x80008000u; const unsigned sw = 0x00003210 | (((sign >> 21) | (sign >> 13)) * 0x11); const unsigned int res = __byte_perm(*reinterpret_cast<const unsigned*>(&a), *reinterpret_cast<const unsigned*>(&b), sw); return *reinterpret_cast<const half2*>(&res); } __device__ half half_max(const half a, const half b) { const half sub = __hsub(a, b); const unsigned sign = (*reinterpret_cast<const short*>(&sub)) & 0x8000u; const unsigned sw = 0x00000010 | ((sign >> 13) * 0x11); const unsigned short res = __byte_perm(*reinterpret_cast<const short*>(&a), *reinterpret_cast<const short*>(&b), sw); return *reinterpret_cast<const half*>(&res); } template <typename T> __global__ void hmax(T const *__restrict__ const a, T const *__restrict__ const b, T *__restrict__ const r, const size_t size) { for (size_t i = threadIdx.x + blockDim.x * blockIdx.x; i < size; i += blockDim.x * gridDim.x) r[i] = half_max(a[i], b[i]); } void generateInput(half2 * a, size_t size) { for (size_t i = 0; i < size; ++i) { half2 temp; temp.x = static_cast<float>(rand() % 922021); temp.y = static_cast<float>(rand() % 922021); a[i] = temp; } } // compute the maximum of two values int main(int argc, char *argv[]) { if (argc != 2) { printf("Usage: %s <repeat>\n", argv[0]); return 1; } const int repeat = atoi(argv[1]); size_t size = (size_t)NUM_OF_BLOCKS * NUM_OF_THREADS; const size_t size_bytes = size * sizeof(half2); half2 * a, *b, *r; half2 * d_a, *d_b, *d_r; a = (half2*) malloc (size_bytes); b = (half2*) malloc (size_bytes); r = (half2*) malloc (size_bytes); hipMalloc((void**)&d_a, size_bytes); hipMalloc((void**)&d_b, size_bytes); hipMalloc((void**)&d_r, size_bytes); // initialize input values srand(123); generateInput(a, size); hipMemcpy(d_a, a, size_bytes, hipMemcpyHostToDevice); generateInput(b, size); hipMemcpy(d_b, b, size_bytes, hipMemcpyHostToDevice); for (int i = 0; i < repeat; i++) hmax<half2><<<NUM_OF_BLOCKS, NUM_OF_THREADS>>>( d_a, d_b, d_r, size); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); // run hmax2 for (int i = 0; i < repeat; i++) hmax<half2><<<NUM_OF_BLOCKS, NUM_OF_THREADS>>>( d_a, d_b, d_r, size); hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (us)\n", (time * 1e-3f) / repeat); // verify hipMemcpy(r, d_r, size_bytes, hipMemcpyDeviceToHost); bool ok = true; for (size_t i = 0; i < size; ++i) { float2 fa = __half22float2(a[i]); float2 fb = __half22float2(b[i]); float2 fr = __half22float2(r[i]); float x = fmaxf(fa.x, fb.x); float y = fmaxf(fa.y, fb.y); if (fabsf(fr.x - x) > 1e-3 || fabsf(fr.y - y) > 1e-3) { ok = false; break; } } printf("fp16_hmax2 %s\n", ok ? "PASS" : "FAIL"); for (int i = 0; i < repeat; i++) hmax<half><<<NUM_OF_BLOCKS, NUM_OF_THREADS>>>( (half*)d_a, (half*)d_b, (half*)d_r, size*2); hipDeviceSynchronize(); start = std::chrono::steady_clock::now(); // run hmax (the size is doubled) for (int i = 0; i < repeat; i++) hmax<half><<<NUM_OF_BLOCKS, NUM_OF_THREADS>>>( (half*)d_a, (half*)d_b, (half*)d_r, size*2); hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); printf("Average kernel execution time %f (us)\n", (time * 1e-3f) / repeat); hipMemcpy(r, d_r, size_bytes, hipMemcpyDeviceToHost); // verify ok = true; for (size_t i = 0; i < size; ++i) { float2 fa = __half22float2(a[i]); float2 fb = __half22float2(b[i]); float2 fr = __half22float2(r[i]); float x = fmaxf(fa.x, fb.x); float y = fmaxf(fa.y, fb.y); if (fabsf(fr.x - x) > 1e-3 || fabsf(fr.y - y) > 1e-3) { ok = false; break; } } printf("fp16_hmax %s\n", ok ? "PASS" : "FAIL"); hipFree(d_a); hipFree(d_b); hipFree(d_r); free(a); free(b); free(r); return EXIT_SUCCESS; }
45ee3c13391102fbb024e2dd43b7659b56885340.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void findCentroidsAtomicFreeLocal_64(int afLocal, int* responses, int nPixels, int* cluster, int* centroidMass, unsigned int* centroidCount) { int const af_id = blockIdx.x; int const cluster_id = blockIdx.y; int const filter_id = threadIdx.x; int* filter_responses = &responses[filter_id*nPixels]; int local_responses = 0; int local_count = 0; int pixel_start = af_id*afLocal; int pixel_end = (af_id+1)*afLocal; pixel_end = pixel_end>nPixels?nPixels:pixel_end; for (int i=pixel_start; i<pixel_end; i++) { if (cluster[i] == cluster_id) { local_responses += filter_responses[i]; local_count++; } } int idx = af_id * gridDim.y*blockDim.x + filter_id*64 + cluster_id; centroidMass[idx] = local_responses; centroidCount[idx] = local_count; }
45ee3c13391102fbb024e2dd43b7659b56885340.cu
#include "includes.h" __global__ void findCentroidsAtomicFreeLocal_64(int afLocal, int* responses, int nPixels, int* cluster, int* centroidMass, unsigned int* centroidCount) { int const af_id = blockIdx.x; int const cluster_id = blockIdx.y; int const filter_id = threadIdx.x; int* filter_responses = &responses[filter_id*nPixels]; int local_responses = 0; int local_count = 0; int pixel_start = af_id*afLocal; int pixel_end = (af_id+1)*afLocal; pixel_end = pixel_end>nPixels?nPixels:pixel_end; for (int i=pixel_start; i<pixel_end; i++) { if (cluster[i] == cluster_id) { local_responses += filter_responses[i]; local_count++; } } int idx = af_id * gridDim.y*blockDim.x + filter_id*64 + cluster_id; centroidMass[idx] = local_responses; centroidCount[idx] = local_count; }
fe86b279d811abf9759cd547729ccf9d3e58f6bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <iostream> #include <cmath> #define SIZE 3 #define BLOCK_SIZE 2 __global__ void matrix_multiplication(float* d_prod, float* d_m, float* d_n, int size) { int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; if(row < size && col < size) { float val = 0.0f; for(int k = 0; k < size; ++k) { val += d_m[k+row*size]*d_n[k*size+col]; } d_prod[col+row*size] = val; } } int main() { float* h_m; float* h_n; float* h_prod; float* d_m; float* d_n; float* d_prod; size_t size = SIZE*SIZE; h_m = new float[size]; h_n = new float[size]; h_prod = new float[size]; size_t bytes = size*sizeof(float); hipMalloc((void **) &d_m, bytes); hipMalloc((void **) &d_n, bytes); hipMalloc((void **) &d_prod, bytes); for(size_t i = 0; i < size; ++i) { h_m[i] = i*i; h_n[i] = i; } hipMemcpy(d_m, h_m, bytes, hipMemcpyHostToDevice); hipMemcpy(d_n, h_n, bytes, hipMemcpyHostToDevice); // TODO::Kernal Call /* std::cout<<ceil(static_cast<float>(SIZE)/static_cast<float>(BLOCK_SIZE))<<std::endl; */ int block_size = ceil(static_cast<float>(SIZE)/static_cast<float>(BLOCK_SIZE)); dim3 nblocks(block_size, block_size, 1); dim3 nthreads(BLOCK_SIZE, BLOCK_SIZE, 1); hipLaunchKernelGGL(( matrix_multiplication), dim3(nblocks), dim3(nthreads), 0, 0, d_prod, d_m, d_n, SIZE); hipMemcpy(h_prod, d_prod, bytes, hipMemcpyDeviceToHost); for(size_t i = 0; i < size; ++i) { if(i % SIZE == 0) std::cout<<std::endl; std::cout<<h_prod[i]<<" "; } hipFree(d_m); hipFree(d_n); hipFree(d_prod); delete[] h_m; delete[] h_n; delete[] h_prod; return 0; }
fe86b279d811abf9759cd547729ccf9d3e58f6bf.cu
#include <cstdio> #include <iostream> #include <cmath> #define SIZE 3 #define BLOCK_SIZE 2 __global__ void matrix_multiplication(float* d_prod, float* d_m, float* d_n, int size) { int col = threadIdx.x + blockDim.x*blockIdx.x; int row = threadIdx.y + blockDim.y*blockIdx.y; if(row < size && col < size) { float val = 0.0f; for(int k = 0; k < size; ++k) { val += d_m[k+row*size]*d_n[k*size+col]; } d_prod[col+row*size] = val; } } int main() { float* h_m; float* h_n; float* h_prod; float* d_m; float* d_n; float* d_prod; size_t size = SIZE*SIZE; h_m = new float[size]; h_n = new float[size]; h_prod = new float[size]; size_t bytes = size*sizeof(float); cudaMalloc((void **) &d_m, bytes); cudaMalloc((void **) &d_n, bytes); cudaMalloc((void **) &d_prod, bytes); for(size_t i = 0; i < size; ++i) { h_m[i] = i*i; h_n[i] = i; } cudaMemcpy(d_m, h_m, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_n, h_n, bytes, cudaMemcpyHostToDevice); // TODO::Kernal Call /* std::cout<<ceil(static_cast<float>(SIZE)/static_cast<float>(BLOCK_SIZE))<<std::endl; */ int block_size = ceil(static_cast<float>(SIZE)/static_cast<float>(BLOCK_SIZE)); dim3 nblocks(block_size, block_size, 1); dim3 nthreads(BLOCK_SIZE, BLOCK_SIZE, 1); matrix_multiplication<<<nblocks, nthreads>>>(d_prod, d_m, d_n, SIZE); cudaMemcpy(h_prod, d_prod, bytes, cudaMemcpyDeviceToHost); for(size_t i = 0; i < size; ++i) { if(i % SIZE == 0) std::cout<<std::endl; std::cout<<h_prod[i]<<" "; } cudaFree(d_m); cudaFree(d_n); cudaFree(d_prod); delete[] h_m; delete[] h_n; delete[] h_prod; return 0; }
0cd7ce719092c8897736bca84cf4984a2bbbf216.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> /* pow() */ #include <cstdint> /* uint64_t */ #include <ctime> /* time() */ #include <cstdlib> #include <unistd.h> #include <iostream> using namespace std; #include <ctime> /* time() */ #include <sys/time.h> #include <stdlib.h> #include <iostream> #include <cstdint> /* int64_t, uint64_t */ void printVec(uint64_t *vec, uint64_t n){ std::cout << "["; for(uint64_t i = 0; i < n; i++){ std::cout << vec[i] << ","; } std::cout << "]" << std::endl; } __global__ void bit_reverse_gpu(uint64_t *vec, uint64_t *result, int *indices, uint64_t n, uint64_t batch){ int batch_id = blockIdx.x; // one block (with n threads) handles one vector if possible int j = threadIdx.x; int blockdim = blockDim.x; if(blockDim.x == n){ // one block (with n threads) handles one vector // we have #batch blocks // eg. n=16, batch=4 <=> there're 4 blocks, blockDim = 16 result[ batch_id*blockdim + indices[j] ] = vec[ batch_id*blockdim + j]; } else if(blockDim.x < n){ int k = n / (blockDim.x); // eg: n=2048 while blockDim=1024, so 2 blocks handle one vector (vec seperated into 2 parts) int vec_part = blockIdx.x % k; result[ (batch_id/k)*n + indices[vec_part*blockdim + j] ] = vec[ batch_id*blockdim + j]; } } __host__ uint64_t * bit_reverse_table(uint64_t *vec, uint64_t n, uint64_t batch){ int size = n*batch * sizeof(uint64_t); int get_indices1[] = {0}; int get_indices2[] = {0, 1}; int get_indices4[] = {0, 2, 1, 3}; int get_indices8[] = {0, 4, 2, 6, 1, 5, 3, 7}; int get_indices16[] = {0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}; int get_indices32[] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19, 11, 27, 7, 23, 15, 31}; int get_indices64[] = {0, 32, 16, 48, 8, 40, 24, 56, 4, 36, 20, 52, 12, 44, 28, 60, 2, 34, 18, 50, 10, 42, 26, 58, 6, 38, 22, 54, 14, 46, 30, 62, 1, 33, 17, 49, 9, 41, 25, 57, 5, 37, 21, 53, 13, 45, 29, 61, 3, 35, 19, 51, 11, 43, 27, 59, 7, 39, 23, 55, 15, 47, 31, 63}; int get_indices128[] = {0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120, 4, 68, 36, 100, 20, 84, 52, 116, 12, 76, 44, 108, 28, 92, 60, 124, 2, 66, 34, 98, 18, 82, 50, 114, 10, 74, 42, 106, 26, 90, 58, 122, 6, 70, 38, 102, 22, 86, 54, 118, 14, 78, 46, 110, 30, 94, 62, 126, 1, 65, 33, 97, 17, 81, 49, 113, 9, 73, 41, 105, 25, 89, 57, 121, 5, 69, 37, 101, 21, 85, 53, 117, 13, 77, 45, 109, 29, 93, 61, 125, 3, 67, 35, 99, 19, 83, 51, 115, 11, 75, 43, 107, 27, 91, 59, 123, 7, 71, 39, 103, 23, 87, 55, 119, 15, 79, 47, 111, 31, 95, 63, 127}; int get_indices256[] = {0, 128, 64, 192, 32, 160, 96, 224, 16, 144, 80, 208, 48, 176, 112, 240, 8, 136, 72, 200, 40, 168, 104, 232, 24, 152, 88, 216, 56, 184, 120, 248, 4, 132, 68, 196, 36, 164, 100, 228, 20, 148, 84, 212, 52, 180, 116, 244, 12, 140, 76, 204, 44, 172, 108, 236, 28, 156, 92, 220, 60, 188, 124, 252, 2, 130, 66, 194, 34, 162, 98, 226, 18, 146, 82, 210, 50, 178, 114, 242, 10, 138, 74, 202, 42, 170, 106, 234, 26, 154, 90, 218, 58, 186, 122, 250, 6, 134, 70, 198, 38, 166, 102, 230, 22, 150, 86, 214, 54, 182, 118, 246, 14, 142, 78, 206, 46, 174, 110, 238, 30, 158, 94, 222, 62, 190, 126, 254, 1, 129, 65, 193, 33, 161, 97, 225, 17, 145, 81, 209, 49, 177, 113, 241, 9, 137, 73, 201, 41, 169, 105, 233, 25, 153, 89, 217, 57, 185, 121, 249, 5, 133, 69, 197, 37, 165, 101, 229, 21, 149, 85, 213, 53, 181, 117, 245, 13, 141, 77, 205, 45, 173, 109, 237, 29, 157, 93, 221, 61, 189, 125, 253, 3, 131, 67, 195, 35, 163, 99, 227, 19, 147, 83, 211, 51, 179, 115, 243, 11, 139, 75, 203, 43, 171, 107, 235, 27, 155, 91, 219, 59, 187, 123, 251, 7, 135, 71, 199, 39, 167, 103, 231, 23, 151, 87, 215, 55, 183, 119, 247, 15, 143, 79, 207, 47, 175, 111, 239, 31, 159, 95, 223, 63, 191, 127, 255}; int get_indices512[] = {0, 256, 128, 384, 64, 320, 192, 448, 32, 288, 160, 416, 96, 352, 224, 480, 16, 272, 144, 400, 80, 336, 208, 464, 48, 304, 176, 432, 112, 368, 240, 496, 8, 264, 136, 392, 72, 328, 200, 456, 40, 296, 168, 424, 104, 360, 232, 488, 24, 280, 152, 408, 88, 344, 216, 472, 56, 312, 184, 440, 120, 376, 248, 504, 4, 260, 132, 388, 68, 324, 196, 452, 36, 292, 164, 420, 100, 356, 228, 484, 20, 276, 148, 404, 84, 340, 212, 468, 52, 308, 180, 436, 116, 372, 244, 500, 12, 268, 140, 396, 76, 332, 204, 460, 44, 300, 172, 428, 108, 364, 236, 492, 28, 284, 156, 412, 92, 348, 220, 476, 60, 316, 188, 444, 124, 380, 252, 508, 2, 258, 130, 386, 66, 322, 194, 450, 34, 290, 162, 418, 98, 354, 226, 482, 18, 274, 146, 402, 82, 338, 210, 466, 50, 306, 178, 434, 114, 370, 242, 498, 10, 266, 138, 394, 74, 330, 202, 458, 42, 298, 170, 426, 106, 362, 234, 490, 26, 282, 154, 410, 90, 346, 218, 474, 58, 314, 186, 442, 122, 378, 250, 506, 6, 262, 134, 390, 70, 326, 198, 454, 38, 294, 166, 422, 102, 358, 230, 486, 22, 278, 150, 406, 86, 342, 214, 470, 54, 310, 182, 438, 118, 374, 246, 502, 14, 270, 142, 398, 78, 334, 206, 462, 46, 302, 174, 430, 110, 366, 238, 494, 30, 286, 158, 414, 94, 350, 222, 478, 62, 318, 190, 446, 126, 382, 254, 510, 1, 257, 129, 385, 65, 321, 193, 449, 33, 289, 161, 417, 97, 353, 225, 481, 17, 273, 145, 401, 81, 337, 209, 465, 49, 305, 177, 433, 113, 369, 241, 497, 9, 265, 137, 393, 73, 329, 201, 457, 41, 297, 169, 425, 105, 361, 233, 489, 25, 281, 153, 409, 89, 345, 217, 473, 57, 313, 185, 441, 121, 377, 249, 505, 5, 261, 133, 389, 69, 325, 197, 453, 37, 293, 165, 421, 101, 357, 229, 485, 21, 277, 149, 405, 85, 341, 213, 469, 53, 309, 181, 437, 117, 373, 245, 501, 13, 269, 141, 397, 77, 333, 205, 461, 45, 301, 173, 429, 109, 365, 237, 493, 29, 285, 157, 413, 93, 349, 221, 477, 61, 317, 189, 445, 125, 381, 253, 509, 3, 259, 131, 387, 67, 323, 195, 451, 35, 291, 163, 419, 99, 355, 227, 483, 19, 275, 147, 403, 83, 339, 211, 467, 51, 307, 179, 435, 115, 371, 243, 499, 11, 267, 139, 395, 75, 331, 203, 459, 43, 299, 171, 427, 107, 363, 235, 491, 27, 283, 155, 411, 91, 347, 219, 475, 59, 315, 187, 443, 123, 379, 251, 507, 7, 263, 135, 391, 71, 327, 199, 455, 39, 295, 167, 423, 103, 359, 231, 487, 23, 279, 151, 407, 87, 343, 215, 471, 55, 311, 183, 439, 119, 375, 247, 503, 15, 271, 143, 399, 79, 335, 207, 463, 47, 303, 175, 431, 111, 367, 239, 495, 31, 287, 159, 415, 95, 351, 223, 479, 63, 319, 191, 447, 127, 383, 255, 511}; int get_indices1024[] = {0, 512, 256, 768, 128, 640, 384, 896, 64, 576, 320, 832, 192, 704, 448, 960, 32, 544, 288, 800, 160, 672, 416, 928, 96, 608, 352, 864, 224, 736, 480, 992, 16, 528, 272, 784, 144, 656, 400, 912, 80, 592, 336, 848, 208, 720, 464, 976, 48, 560, 304, 816, 176, 688, 432, 944, 112, 624, 368, 880, 240, 752, 496, 1008, 8, 520, 264, 776, 136, 648, 392, 904, 72, 584, 328, 840, 200, 712, 456, 968, 40, 552, 296, 808, 168, 680, 424, 936, 104, 616, 360, 872, 232, 744, 488, 1000, 24, 536, 280, 792, 152, 664, 408, 920, 88, 600, 344, 856, 216, 728, 472, 984, 56, 568, 312, 824, 184, 696, 440, 952, 120, 632, 376, 888, 248, 760, 504, 1016, 4, 516, 260, 772, 132, 644, 388, 900, 68, 580, 324, 836, 196, 708, 452, 964, 36, 548, 292, 804, 164, 676, 420, 932, 100, 612, 356, 868, 228, 740, 484, 996, 20, 532, 276, 788, 148, 660, 404, 916, 84, 596, 340, 852, 212, 724, 468, 980, 52, 564, 308, 820, 180, 692, 436, 948, 116, 628, 372, 884, 244, 756, 500, 1012, 12, 524, 268, 780, 140, 652, 396, 908, 76, 588, 332, 844, 204, 716, 460, 972, 44, 556, 300, 812, 172, 684, 428, 940, 108, 620, 364, 876, 236, 748, 492, 1004, 28, 540, 284, 796, 156, 668, 412, 924, 92, 604, 348, 860, 220, 732, 476, 988, 60, 572, 316, 828, 188, 700, 444, 956, 124, 636, 380, 892, 252, 764, 508, 1020, 2, 514, 258, 770, 130, 642, 386, 898, 66, 578, 322, 834, 194, 706, 450, 962, 34, 546, 290, 802, 162, 674, 418, 930, 98, 610, 354, 866, 226, 738, 482, 994, 18, 530, 274, 786, 146, 658, 402, 914, 82, 594, 338, 850, 210, 722, 466, 978, 50, 562, 306, 818, 178, 690, 434, 946, 114, 626, 370, 882, 242, 754, 498, 1010, 10, 522, 266, 778, 138, 650, 394, 906, 74, 586, 330, 842, 202, 714, 458, 970, 42, 554, 298, 810, 170, 682, 426, 938, 106, 618, 362, 874, 234, 746, 490, 1002, 26, 538, 282, 794, 154, 666, 410, 922, 90, 602, 346, 858, 218, 730, 474, 986, 58, 570, 314, 826, 186, 698, 442, 954, 122, 634, 378, 890, 250, 762, 506, 1018, 6, 518, 262, 774, 134, 646, 390, 902, 70, 582, 326, 838, 198, 710, 454, 966, 38, 550, 294, 806, 166, 678, 422, 934, 102, 614, 358, 870, 230, 742, 486, 998, 22, 534, 278, 790, 150, 662, 406, 918, 86, 598, 342, 854, 214, 726, 470, 982, 54, 566, 310, 822, 182, 694, 438, 950, 118, 630, 374, 886, 246, 758, 502, 1014, 14, 526, 270, 782, 142, 654, 398, 910, 78, 590, 334, 846, 206, 718, 462, 974, 46, 558, 302, 814, 174, 686, 430, 942, 110, 622, 366, 878, 238, 750, 494, 1006, 30, 542, 286, 798, 158, 670, 414, 926, 94, 606, 350, 862, 222, 734, 478, 990, 62, 574, 318, 830, 190, 702, 446, 958, 126, 638, 382, 894, 254, 766, 510, 1022, 1, 513, 257, 769, 129, 641, 385, 897, 65, 577, 321, 833, 193, 705, 449, 961, 33, 545, 289, 801, 161, 673, 417, 929, 97, 609, 353, 865, 225, 737, 481, 993, 17, 529, 273, 785, 145, 657, 401, 913, 81, 593, 337, 849, 209, 721, 465, 977, 49, 561, 305, 817, 177, 689, 433, 945, 113, 625, 369, 881, 241, 753, 497, 1009, 9, 521, 265, 777, 137, 649, 393, 905, 73, 585, 329, 841, 201, 713, 457, 969, 41, 553, 297, 809, 169, 681, 425, 937, 105, 617, 361, 873, 233, 745, 489, 1001, 25, 537, 281, 793, 153, 665, 409, 921, 89, 601, 345, 857, 217, 729, 473, 985, 57, 569, 313, 825, 185, 697, 441, 953, 121, 633, 377, 889, 249, 761, 505, 1017, 5, 517, 261, 773, 133, 645, 389, 901, 69, 581, 325, 837, 197, 709, 453, 965, 37, 549, 293, 805, 165, 677, 421, 933, 101, 613, 357, 869, 229, 741, 485, 997, 21, 533, 277, 789, 149, 661, 405, 917, 85, 597, 341, 853, 213, 725, 469, 981, 53, 565, 309, 821, 181, 693, 437, 949, 117, 629, 373, 885, 245, 757, 501, 1013, 13, 525, 269, 781, 141, 653, 397, 909, 77, 589, 333, 845, 205, 717, 461, 973, 45, 557, 301, 813, 173, 685, 429, 941, 109, 621, 365, 877, 237, 749, 493, 1005, 29, 541, 285, 797, 157, 669, 413, 925, 93, 605, 349, 861, 221, 733, 477, 989, 61, 573, 317, 829, 189, 701, 445, 957, 125, 637, 381, 893, 253, 765, 509, 1021, 3, 515, 259, 771, 131, 643, 387, 899, 67, 579, 323, 835, 195, 707, 451, 963, 35, 547, 291, 803, 163, 675, 419, 931, 99, 611, 355, 867, 227, 739, 483, 995, 19, 531, 275, 787, 147, 659, 403, 915, 83, 595, 339, 851, 211, 723, 467, 979, 51, 563, 307, 819, 179, 691, 435, 947, 115, 627, 371, 883, 243, 755, 499, 1011, 11, 523, 267, 779, 139, 651, 395, 907, 75, 587, 331, 843, 203, 715, 459, 971, 43, 555, 299, 811, 171, 683, 427, 939, 107, 619, 363, 875, 235, 747, 491, 1003, 27, 539, 283, 795, 155, 667, 411, 923, 91, 603, 347, 859, 219, 731, 475, 987, 59, 571, 315, 827, 187, 699, 443, 955, 123, 635, 379, 891, 251, 763, 507, 1019, 7, 519, 263, 775, 135, 647, 391, 903, 71, 583, 327, 839, 199, 711, 455, 967, 39, 551, 295, 807, 167, 679, 423, 935, 103, 615, 359, 871, 231, 743, 487, 999, 23, 535, 279, 791, 151, 663, 407, 919, 87, 599, 343, 855, 215, 727, 471, 983, 55, 567, 311, 823, 183, 695, 439, 951, 119, 631, 375, 887, 247, 759, 503, 1015, 15, 527, 271, 783, 143, 655, 399, 911, 79, 591, 335, 847, 207, 719, 463, 975, 47, 559, 303, 815, 175, 687, 431, 943, 111, 623, 367, 879, 239, 751, 495, 1007, 31, 543, 287, 799, 159, 671, 415, 927, 95, 607, 351, 863, 223, 735, 479, 991, 63, 575, 319, 831, 191, 703, 447, 959, 127, 639, 383, 895, 255, 767, 511, 1023}; int get_indices2048[] = {0, 1024, 512, 1536, 256, 1280, 768, 1792, 128, 1152, 640, 1664, 384, 1408, 896, 1920, 64, 1088, 576, 1600, 320, 1344, 832, 1856, 192, 1216, 704, 1728, 448, 1472, 960, 1984, 32, 1056, 544, 1568, 288, 1312, 800, 1824, 160, 1184, 672, 1696, 416, 1440, 928, 1952, 96, 1120, 608, 1632, 352, 1376, 864, 1888, 224, 1248, 736, 1760, 480, 1504, 992, 2016, 16, 1040, 528, 1552, 272, 1296, 784, 1808, 144, 1168, 656, 1680, 400, 1424, 912, 1936, 80, 1104, 592, 1616, 336, 1360, 848, 1872, 208, 1232, 720, 1744, 464, 1488, 976, 2000, 48, 1072, 560, 1584, 304, 1328, 816, 1840, 176, 1200, 688, 1712, 432, 1456, 944, 1968, 112, 1136, 624, 1648, 368, 1392, 880, 1904, 240, 1264, 752, 1776, 496, 1520, 1008, 2032, 8, 1032, 520, 1544, 264, 1288, 776, 1800, 136, 1160, 648, 1672, 392, 1416, 904, 1928, 72, 1096, 584, 1608, 328, 1352, 840, 1864, 200, 1224, 712, 1736, 456, 1480, 968, 1992, 40, 1064, 552, 1576, 296, 1320, 808, 1832, 168, 1192, 680, 1704, 424, 1448, 936, 1960, 104, 1128, 616, 1640, 360, 1384, 872, 1896, 232, 1256, 744, 1768, 488, 1512, 1000, 2024, 24, 1048, 536, 1560, 280, 1304, 792, 1816, 152, 1176, 664, 1688, 408, 1432, 920, 1944, 88, 1112, 600, 1624, 344, 1368, 856, 1880, 216, 1240, 728, 1752, 472, 1496, 984, 2008, 56, 1080, 568, 1592, 312, 1336, 824, 1848, 184, 1208, 696, 1720, 440, 1464, 952, 1976, 120, 1144, 632, 1656, 376, 1400, 888, 1912, 248, 1272, 760, 1784, 504, 1528, 1016, 2040, 4, 1028, 516, 1540, 260, 1284, 772, 1796, 132, 1156, 644, 1668, 388, 1412, 900, 1924, 68, 1092, 580, 1604, 324, 1348, 836, 1860, 196, 1220, 708, 1732, 452, 1476, 964, 1988, 36, 1060, 548, 1572, 292, 1316, 804, 1828, 164, 1188, 676, 1700, 420, 1444, 932, 1956, 100, 1124, 612, 1636, 356, 1380, 868, 1892, 228, 1252, 740, 1764, 484, 1508, 996, 2020, 20, 1044, 532, 1556, 276, 1300, 788, 1812, 148, 1172, 660, 1684, 404, 1428, 916, 1940, 84, 1108, 596, 1620, 340, 1364, 852, 1876, 212, 1236, 724, 1748, 468, 1492, 980, 2004, 52, 1076, 564, 1588, 308, 1332, 820, 1844, 180, 1204, 692, 1716, 436, 1460, 948, 1972, 116, 1140, 628, 1652, 372, 1396, 884, 1908, 244, 1268, 756, 1780, 500, 1524, 1012, 2036, 12, 1036, 524, 1548, 268, 1292, 780, 1804, 140, 1164, 652, 1676, 396, 1420, 908, 1932, 76, 1100, 588, 1612, 332, 1356, 844, 1868, 204, 1228, 716, 1740, 460, 1484, 972, 1996, 44, 1068, 556, 1580, 300, 1324, 812, 1836, 172, 1196, 684, 1708, 428, 1452, 940, 1964, 108, 1132, 620, 1644, 364, 1388, 876, 1900, 236, 1260, 748, 1772, 492, 1516, 1004, 2028, 28, 1052, 540, 1564, 284, 1308, 796, 1820, 156, 1180, 668, 1692, 412, 1436, 924, 1948, 92, 1116, 604, 1628, 348, 1372, 860, 1884, 220, 1244, 732, 1756, 476, 1500, 988, 2012, 60, 1084, 572, 1596, 316, 1340, 828, 1852, 188, 1212, 700, 1724, 444, 1468, 956, 1980, 124, 1148, 636, 1660, 380, 1404, 892, 1916, 252, 1276, 764, 1788, 508, 1532, 1020, 2044, 2, 1026, 514, 1538, 258, 1282, 770, 1794, 130, 1154, 642, 1666, 386, 1410, 898, 1922, 66, 1090, 578, 1602, 322, 1346, 834, 1858, 194, 1218, 706, 1730, 450, 1474, 962, 1986, 34, 1058, 546, 1570, 290, 1314, 802, 1826, 162, 1186, 674, 1698, 418, 1442, 930, 1954, 98, 1122, 610, 1634, 354, 1378, 866, 1890, 226, 1250, 738, 1762, 482, 1506, 994, 2018, 18, 1042, 530, 1554, 274, 1298, 786, 1810, 146, 1170, 658, 1682, 402, 1426, 914, 1938, 82, 1106, 594, 1618, 338, 1362, 850, 1874, 210, 1234, 722, 1746, 466, 1490, 978, 2002, 50, 1074, 562, 1586, 306, 1330, 818, 1842, 178, 1202, 690, 1714, 434, 1458, 946, 1970, 114, 1138, 626, 1650, 370, 1394, 882, 1906, 242, 1266, 754, 1778, 498, 1522, 1010, 2034, 10, 1034, 522, 1546, 266, 1290, 778, 1802, 138, 1162, 650, 1674, 394, 1418, 906, 1930, 74, 1098, 586, 1610, 330, 1354, 842, 1866, 202, 1226, 714, 1738, 458, 1482, 970, 1994, 42, 1066, 554, 1578, 298, 1322, 810, 1834, 170, 1194, 682, 1706, 426, 1450, 938, 1962, 106, 1130, 618, 1642, 362, 1386, 874, 1898, 234, 1258, 746, 1770, 490, 1514, 1002, 2026, 26, 1050, 538, 1562, 282, 1306, 794, 1818, 154, 1178, 666, 1690, 410, 1434, 922, 1946, 90, 1114, 602, 1626, 346, 1370, 858, 1882, 218, 1242, 730, 1754, 474, 1498, 986, 2010, 58, 1082, 570, 1594, 314, 1338, 826, 1850, 186, 1210, 698, 1722, 442, 1466, 954, 1978, 122, 1146, 634, 1658, 378, 1402, 890, 1914, 250, 1274, 762, 1786, 506, 1530, 1018, 2042, 6, 1030, 518, 1542, 262, 1286, 774, 1798, 134, 1158, 646, 1670, 390, 1414, 902, 1926, 70, 1094, 582, 1606, 326, 1350, 838, 1862, 198, 1222, 710, 1734, 454, 1478, 966, 1990, 38, 1062, 550, 1574, 294, 1318, 806, 1830, 166, 1190, 678, 1702, 422, 1446, 934, 1958, 102, 1126, 614, 1638, 358, 1382, 870, 1894, 230, 1254, 742, 1766, 486, 1510, 998, 2022, 22, 1046, 534, 1558, 278, 1302, 790, 1814, 150, 1174, 662, 1686, 406, 1430, 918, 1942, 86, 1110, 598, 1622, 342, 1366, 854, 1878, 214, 1238, 726, 1750, 470, 1494, 982, 2006, 54, 1078, 566, 1590, 310, 1334, 822, 1846, 182, 1206, 694, 1718, 438, 1462, 950, 1974, 118, 1142, 630, 1654, 374, 1398, 886, 1910, 246, 1270, 758, 1782, 502, 1526, 1014, 2038, 14, 1038, 526, 1550, 270, 1294, 782, 1806, 142, 1166, 654, 1678, 398, 1422, 910, 1934, 78, 1102, 590, 1614, 334, 1358, 846, 1870, 206, 1230, 718, 1742, 462, 1486, 974, 1998, 46, 1070, 558, 1582, 302, 1326, 814, 1838, 174, 1198, 686, 1710, 430, 1454, 942, 1966, 110, 1134, 622, 1646, 366, 1390, 878, 1902, 238, 1262, 750, 1774, 494, 1518, 1006, 2030, 30, 1054, 542, 1566, 286, 1310, 798, 1822, 158, 1182, 670, 1694, 414, 1438, 926, 1950, 94, 1118, 606, 1630, 350, 1374, 862, 1886, 222, 1246, 734, 1758, 478, 1502, 990, 2014, 62, 1086, 574, 1598, 318, 1342, 830, 1854, 190, 1214, 702, 1726, 446, 1470, 958, 1982, 126, 1150, 638, 1662, 382, 1406, 894, 1918, 254, 1278, 766, 1790, 510, 1534, 1022, 2046, 1, 1025, 513, 1537, 257, 1281, 769, 1793, 129, 1153, 641, 1665, 385, 1409, 897, 1921, 65, 1089, 577, 1601, 321, 1345, 833, 1857, 193, 1217, 705, 1729, 449, 1473, 961, 1985, 33, 1057, 545, 1569, 289, 1313, 801, 1825, 161, 1185, 673, 1697, 417, 1441, 929, 1953, 97, 1121, 609, 1633, 353, 1377, 865, 1889, 225, 1249, 737, 1761, 481, 1505, 993, 2017, 17, 1041, 529, 1553, 273, 1297, 785, 1809, 145, 1169, 657, 1681, 401, 1425, 913, 1937, 81, 1105, 593, 1617, 337, 1361, 849, 1873, 209, 1233, 721, 1745, 465, 1489, 977, 2001, 49, 1073, 561, 1585, 305, 1329, 817, 1841, 177, 1201, 689, 1713, 433, 1457, 945, 1969, 113, 1137, 625, 1649, 369, 1393, 881, 1905, 241, 1265, 753, 1777, 497, 1521, 1009, 2033, 9, 1033, 521, 1545, 265, 1289, 777, 1801, 137, 1161, 649, 1673, 393, 1417, 905, 1929, 73, 1097, 585, 1609, 329, 1353, 841, 1865, 201, 1225, 713, 1737, 457, 1481, 969, 1993, 41, 1065, 553, 1577, 297, 1321, 809, 1833, 169, 1193, 681, 1705, 425, 1449, 937, 1961, 105, 1129, 617, 1641, 361, 1385, 873, 1897, 233, 1257, 745, 1769, 489, 1513, 1001, 2025, 25, 1049, 537, 1561, 281, 1305, 793, 1817, 153, 1177, 665, 1689, 409, 1433, 921, 1945, 89, 1113, 601, 1625, 345, 1369, 857, 1881, 217, 1241, 729, 1753, 473, 1497, 985, 2009, 57, 1081, 569, 1593, 313, 1337, 825, 1849, 185, 1209, 697, 1721, 441, 1465, 953, 1977, 121, 1145, 633, 1657, 377, 1401, 889, 1913, 249, 1273, 761, 1785, 505, 1529, 1017, 2041, 5, 1029, 517, 1541, 261, 1285, 773, 1797, 133, 1157, 645, 1669, 389, 1413, 901, 1925, 69, 1093, 581, 1605, 325, 1349, 837, 1861, 197, 1221, 709, 1733, 453, 1477, 965, 1989, 37, 1061, 549, 1573, 293, 1317, 805, 1829, 165, 1189, 677, 1701, 421, 1445, 933, 1957, 101, 1125, 613, 1637, 357, 1381, 869, 1893, 229, 1253, 741, 1765, 485, 1509, 997, 2021, 21, 1045, 533, 1557, 277, 1301, 789, 1813, 149, 1173, 661, 1685, 405, 1429, 917, 1941, 85, 1109, 597, 1621, 341, 1365, 853, 1877, 213, 1237, 725, 1749, 469, 1493, 981, 2005, 53, 1077, 565, 1589, 309, 1333, 821, 1845, 181, 1205, 693, 1717, 437, 1461, 949, 1973, 117, 1141, 629, 1653, 373, 1397, 885, 1909, 245, 1269, 757, 1781, 501, 1525, 1013, 2037, 13, 1037, 525, 1549, 269, 1293, 781, 1805, 141, 1165, 653, 1677, 397, 1421, 909, 1933, 77, 1101, 589, 1613, 333, 1357, 845, 1869, 205, 1229, 717, 1741, 461, 1485, 973, 1997, 45, 1069, 557, 1581, 301, 1325, 813, 1837, 173, 1197, 685, 1709, 429, 1453, 941, 1965, 109, 1133, 621, 1645, 365, 1389, 877, 1901, 237, 1261, 749, 1773, 493, 1517, 1005, 2029, 29, 1053, 541, 1565, 285, 1309, 797, 1821, 157, 1181, 669, 1693, 413, 1437, 925, 1949, 93, 1117, 605, 1629, 349, 1373, 861, 1885, 221, 1245, 733, 1757, 477, 1501, 989, 2013, 61, 1085, 573, 1597, 317, 1341, 829, 1853, 189, 1213, 701, 1725, 445, 1469, 957, 1981, 125, 1149, 637, 1661, 381, 1405, 893, 1917, 253, 1277, 765, 1789, 509, 1533, 1021, 2045, 3, 1027, 515, 1539, 259, 1283, 771, 1795, 131, 1155, 643, 1667, 387, 1411, 899, 1923, 67, 1091, 579, 1603, 323, 1347, 835, 1859, 195, 1219, 707, 1731, 451, 1475, 963, 1987, 35, 1059, 547, 1571, 291, 1315, 803, 1827, 163, 1187, 675, 1699, 419, 1443, 931, 1955, 99, 1123, 611, 1635, 355, 1379, 867, 1891, 227, 1251, 739, 1763, 483, 1507, 995, 2019, 19, 1043, 531, 1555, 275, 1299, 787, 1811, 147, 1171, 659, 1683, 403, 1427, 915, 1939, 83, 1107, 595, 1619, 339, 1363, 851, 1875, 211, 1235, 723, 1747, 467, 1491, 979, 2003, 51, 1075, 563, 1587, 307, 1331, 819, 1843, 179, 1203, 691, 1715, 435, 1459, 947, 1971, 115, 1139, 627, 1651, 371, 1395, 883, 1907, 243, 1267, 755, 1779, 499, 1523, 1011, 2035, 11, 1035, 523, 1547, 267, 1291, 779, 1803, 139, 1163, 651, 1675, 395, 1419, 907, 1931, 75, 1099, 587, 1611, 331, 1355, 843, 1867, 203, 1227, 715, 1739, 459, 1483, 971, 1995, 43, 1067, 555, 1579, 299, 1323, 811, 1835, 171, 1195, 683, 1707, 427, 1451, 939, 1963, 107, 1131, 619, 1643, 363, 1387, 875, 1899, 235, 1259, 747, 1771, 491, 1515, 1003, 2027, 27, 1051, 539, 1563, 283, 1307, 795, 1819, 155, 1179, 667, 1691, 411, 1435, 923, 1947, 91, 1115, 603, 1627, 347, 1371, 859, 1883, 219, 1243, 731, 1755, 475, 1499, 987, 2011, 59, 1083, 571, 1595, 315, 1339, 827, 1851, 187, 1211, 699, 1723, 443, 1467, 955, 1979, 123, 1147, 635, 1659, 379, 1403, 891, 1915, 251, 1275, 763, 1787, 507, 1531, 1019, 2043, 7, 1031, 519, 1543, 263, 1287, 775, 1799, 135, 1159, 647, 1671, 391, 1415, 903, 1927, 71, 1095, 583, 1607, 327, 1351, 839, 1863, 199, 1223, 711, 1735, 455, 1479, 967, 1991, 39, 1063, 551, 1575, 295, 1319, 807, 1831, 167, 1191, 679, 1703, 423, 1447, 935, 1959, 103, 1127, 615, 1639, 359, 1383, 871, 1895, 231, 1255, 743, 1767, 487, 1511, 999, 2023, 23, 1047, 535, 1559, 279, 1303, 791, 1815, 151, 1175, 663, 1687, 407, 1431, 919, 1943, 87, 1111, 599, 1623, 343, 1367, 855, 1879, 215, 1239, 727, 1751, 471, 1495, 983, 2007, 55, 1079, 567, 1591, 311, 1335, 823, 1847, 183, 1207, 695, 1719, 439, 1463, 951, 1975, 119, 1143, 631, 1655, 375, 1399, 887, 1911, 247, 1271, 759, 1783, 503, 1527, 1015, 2039, 15, 1039, 527, 1551, 271, 1295, 783, 1807, 143, 1167, 655, 1679, 399, 1423, 911, 1935, 79, 1103, 591, 1615, 335, 1359, 847, 1871, 207, 1231, 719, 1743, 463, 1487, 975, 1999, 47, 1071, 559, 1583, 303, 1327, 815, 1839, 175, 1199, 687, 1711, 431, 1455, 943, 1967, 111, 1135, 623, 1647, 367, 1391, 879, 1903, 239, 1263, 751, 1775, 495, 1519, 1007, 2031, 31, 1055, 543, 1567, 287, 1311, 799, 1823, 159, 1183, 671, 1695, 415, 1439, 927, 1951, 95, 1119, 607, 1631, 351, 1375, 863, 1887, 223, 1247, 735, 1759, 479, 1503, 991, 2015, 63, 1087, 575, 1599, 319, 1343, 831, 1855, 191, 1215, 703, 1727, 447, 1471, 959, 1983, 127, 1151, 639, 1663, 383, 1407, 895, 1919, 255, 1279, 767, 1791, 511, 1535, 1023, 2047}; int get_indices4096[] = {0, 2048, 1024, 3072, 512, 2560, 1536, 3584, 256, 2304, 1280, 3328, 768, 2816, 1792, 3840, 128, 2176, 1152, 3200, 640, 2688, 1664, 3712, 384, 2432, 1408, 3456, 896, 2944, 1920, 3968, 64, 2112, 1088, 3136, 576, 2624, 1600, 3648, 320, 2368, 1344, 3392, 832, 2880, 1856, 3904, 192, 2240, 1216, 3264, 704, 2752, 1728, 3776, 448, 2496, 1472, 3520, 960, 3008, 1984, 4032, 32, 2080, 1056, 3104, 544, 2592, 1568, 3616, 288, 2336, 1312, 3360, 800, 2848, 1824, 3872, 160, 2208, 1184, 3232, 672, 2720, 1696, 3744, 416, 2464, 1440, 3488, 928, 2976, 1952, 4000, 96, 2144, 1120, 3168, 608, 2656, 1632, 3680, 352, 2400, 1376, 3424, 864, 2912, 1888, 3936, 224, 2272, 1248, 3296, 736, 2784, 1760, 3808, 480, 2528, 1504, 3552, 992, 3040, 2016, 4064, 16, 2064, 1040, 3088, 528, 2576, 1552, 3600, 272, 2320, 1296, 3344, 784, 2832, 1808, 3856, 144, 2192, 1168, 3216, 656, 2704, 1680, 3728, 400, 2448, 1424, 3472, 912, 2960, 1936, 3984, 80, 2128, 1104, 3152, 592, 2640, 1616, 3664, 336, 2384, 1360, 3408, 848, 2896, 1872, 3920, 208, 2256, 1232, 3280, 720, 2768, 1744, 3792, 464, 2512, 1488, 3536, 976, 3024, 2000, 4048, 48, 2096, 1072, 3120, 560, 2608, 1584, 3632, 304, 2352, 1328, 3376, 816, 2864, 1840, 3888, 176, 2224, 1200, 3248, 688, 2736, 1712, 3760, 432, 2480, 1456, 3504, 944, 2992, 1968, 4016, 112, 2160, 1136, 3184, 624, 2672, 1648, 3696, 368, 2416, 1392, 3440, 880, 2928, 1904, 3952, 240, 2288, 1264, 3312, 752, 2800, 1776, 3824, 496, 2544, 1520, 3568, 1008, 3056, 2032, 4080, 8, 2056, 1032, 3080, 520, 2568, 1544, 3592, 264, 2312, 1288, 3336, 776, 2824, 1800, 3848, 136, 2184, 1160, 3208, 648, 2696, 1672, 3720, 392, 2440, 1416, 3464, 904, 2952, 1928, 3976, 72, 2120, 1096, 3144, 584, 2632, 1608, 3656, 328, 2376, 1352, 3400, 840, 2888, 1864, 3912, 200, 2248, 1224, 3272, 712, 2760, 1736, 3784, 456, 2504, 1480, 3528, 968, 3016, 1992, 4040, 40, 2088, 1064, 3112, 552, 2600, 1576, 3624, 296, 2344, 1320, 3368, 808, 2856, 1832, 3880, 168, 2216, 1192, 3240, 680, 2728, 1704, 3752, 424, 2472, 1448, 3496, 936, 2984, 1960, 4008, 104, 2152, 1128, 3176, 616, 2664, 1640, 3688, 360, 2408, 1384, 3432, 872, 2920, 1896, 3944, 232, 2280, 1256, 3304, 744, 2792, 1768, 3816, 488, 2536, 1512, 3560, 1000, 3048, 2024, 4072, 24, 2072, 1048, 3096, 536, 2584, 1560, 3608, 280, 2328, 1304, 3352, 792, 2840, 1816, 3864, 152, 2200, 1176, 3224, 664, 2712, 1688, 3736, 408, 2456, 1432, 3480, 920, 2968, 1944, 3992, 88, 2136, 1112, 3160, 600, 2648, 1624, 3672, 344, 2392, 1368, 3416, 856, 2904, 1880, 3928, 216, 2264, 1240, 3288, 728, 2776, 1752, 3800, 472, 2520, 1496, 3544, 984, 3032, 2008, 4056, 56, 2104, 1080, 3128, 568, 2616, 1592, 3640, 312, 2360, 1336, 3384, 824, 2872, 1848, 3896, 184, 2232, 1208, 3256, 696, 2744, 1720, 3768, 440, 2488, 1464, 3512, 952, 3000, 1976, 4024, 120, 2168, 1144, 3192, 632, 2680, 1656, 3704, 376, 2424, 1400, 3448, 888, 2936, 1912, 3960, 248, 2296, 1272, 3320, 760, 2808, 1784, 3832, 504, 2552, 1528, 3576, 1016, 3064, 2040, 4088, 4, 2052, 1028, 3076, 516, 2564, 1540, 3588, 260, 2308, 1284, 3332, 772, 2820, 1796, 3844, 132, 2180, 1156, 3204, 644, 2692, 1668, 3716, 388, 2436, 1412, 3460, 900, 2948, 1924, 3972, 68, 2116, 1092, 3140, 580, 2628, 1604, 3652, 324, 2372, 1348, 3396, 836, 2884, 1860, 3908, 196, 2244, 1220, 3268, 708, 2756, 1732, 3780, 452, 2500, 1476, 3524, 964, 3012, 1988, 4036, 36, 2084, 1060, 3108, 548, 2596, 1572, 3620, 292, 2340, 1316, 3364, 804, 2852, 1828, 3876, 164, 2212, 1188, 3236, 676, 2724, 1700, 3748, 420, 2468, 1444, 3492, 932, 2980, 1956, 4004, 100, 2148, 1124, 3172, 612, 2660, 1636, 3684, 356, 2404, 1380, 3428, 868, 2916, 1892, 3940, 228, 2276, 1252, 3300, 740, 2788, 1764, 3812, 484, 2532, 1508, 3556, 996, 3044, 2020, 4068, 20, 2068, 1044, 3092, 532, 2580, 1556, 3604, 276, 2324, 1300, 3348, 788, 2836, 1812, 3860, 148, 2196, 1172, 3220, 660, 2708, 1684, 3732, 404, 2452, 1428, 3476, 916, 2964, 1940, 3988, 84, 2132, 1108, 3156, 596, 2644, 1620, 3668, 340, 2388, 1364, 3412, 852, 2900, 1876, 3924, 212, 2260, 1236, 3284, 724, 2772, 1748, 3796, 468, 2516, 1492, 3540, 980, 3028, 2004, 4052, 52, 2100, 1076, 3124, 564, 2612, 1588, 3636, 308, 2356, 1332, 3380, 820, 2868, 1844, 3892, 180, 2228, 1204, 3252, 692, 2740, 1716, 3764, 436, 2484, 1460, 3508, 948, 2996, 1972, 4020, 116, 2164, 1140, 3188, 628, 2676, 1652, 3700, 372, 2420, 1396, 3444, 884, 2932, 1908, 3956, 244, 2292, 1268, 3316, 756, 2804, 1780, 3828, 500, 2548, 1524, 3572, 1012, 3060, 2036, 4084, 12, 2060, 1036, 3084, 524, 2572, 1548, 3596, 268, 2316, 1292, 3340, 780, 2828, 1804, 3852, 140, 2188, 1164, 3212, 652, 2700, 1676, 3724, 396, 2444, 1420, 3468, 908, 2956, 1932, 3980, 76, 2124, 1100, 3148, 588, 2636, 1612, 3660, 332, 2380, 1356, 3404, 844, 2892, 1868, 3916, 204, 2252, 1228, 3276, 716, 2764, 1740, 3788, 460, 2508, 1484, 3532, 972, 3020, 1996, 4044, 44, 2092, 1068, 3116, 556, 2604, 1580, 3628, 300, 2348, 1324, 3372, 812, 2860, 1836, 3884, 172, 2220, 1196, 3244, 684, 2732, 1708, 3756, 428, 2476, 1452, 3500, 940, 2988, 1964, 4012, 108, 2156, 1132, 3180, 620, 2668, 1644, 3692, 364, 2412, 1388, 3436, 876, 2924, 1900, 3948, 236, 2284, 1260, 3308, 748, 2796, 1772, 3820, 492, 2540, 1516, 3564, 1004, 3052, 2028, 4076, 28, 2076, 1052, 3100, 540, 2588, 1564, 3612, 284, 2332, 1308, 3356, 796, 2844, 1820, 3868, 156, 2204, 1180, 3228, 668, 2716, 1692, 3740, 412, 2460, 1436, 3484, 924, 2972, 1948, 3996, 92, 2140, 1116, 3164, 604, 2652, 1628, 3676, 348, 2396, 1372, 3420, 860, 2908, 1884, 3932, 220, 2268, 1244, 3292, 732, 2780, 1756, 3804, 476, 2524, 1500, 3548, 988, 3036, 2012, 4060, 60, 2108, 1084, 3132, 572, 2620, 1596, 3644, 316, 2364, 1340, 3388, 828, 2876, 1852, 3900, 188, 2236, 1212, 3260, 700, 2748, 1724, 3772, 444, 2492, 1468, 3516, 956, 3004, 1980, 4028, 124, 2172, 1148, 3196, 636, 2684, 1660, 3708, 380, 2428, 1404, 3452, 892, 2940, 1916, 3964, 252, 2300, 1276, 3324, 764, 2812, 1788, 3836, 508, 2556, 1532, 3580, 1020, 3068, 2044, 4092, 2, 2050, 1026, 3074, 514, 2562, 1538, 3586, 258, 2306, 1282, 3330, 770, 2818, 1794, 3842, 130, 2178, 1154, 3202, 642, 2690, 1666, 3714, 386, 2434, 1410, 3458, 898, 2946, 1922, 3970, 66, 2114, 1090, 3138, 578, 2626, 1602, 3650, 322, 2370, 1346, 3394, 834, 2882, 1858, 3906, 194, 2242, 1218, 3266, 706, 2754, 1730, 3778, 450, 2498, 1474, 3522, 962, 3010, 1986, 4034, 34, 2082, 1058, 3106, 546, 2594, 1570, 3618, 290, 2338, 1314, 3362, 802, 2850, 1826, 3874, 162, 2210, 1186, 3234, 674, 2722, 1698, 3746, 418, 2466, 1442, 3490, 930, 2978, 1954, 4002, 98, 2146, 1122, 3170, 610, 2658, 1634, 3682, 354, 2402, 1378, 3426, 866, 2914, 1890, 3938, 226, 2274, 1250, 3298, 738, 2786, 1762, 3810, 482, 2530, 1506, 3554, 994, 3042, 2018, 4066, 18, 2066, 1042, 3090, 530, 2578, 1554, 3602, 274, 2322, 1298, 3346, 786, 2834, 1810, 3858, 146, 2194, 1170, 3218, 658, 2706, 1682, 3730, 402, 2450, 1426, 3474, 914, 2962, 1938, 3986, 82, 2130, 1106, 3154, 594, 2642, 1618, 3666, 338, 2386, 1362, 3410, 850, 2898, 1874, 3922, 210, 2258, 1234, 3282, 722, 2770, 1746, 3794, 466, 2514, 1490, 3538, 978, 3026, 2002, 4050, 50, 2098, 1074, 3122, 562, 2610, 1586, 3634, 306, 2354, 1330, 3378, 818, 2866, 1842, 3890, 178, 2226, 1202, 3250, 690, 2738, 1714, 3762, 434, 2482, 1458, 3506, 946, 2994, 1970, 4018, 114, 2162, 1138, 3186, 626, 2674, 1650, 3698, 370, 2418, 1394, 3442, 882, 2930, 1906, 3954, 242, 2290, 1266, 3314, 754, 2802, 1778, 3826, 498, 2546, 1522, 3570, 1010, 3058, 2034, 4082, 10, 2058, 1034, 3082, 522, 2570, 1546, 3594, 266, 2314, 1290, 3338, 778, 2826, 1802, 3850, 138, 2186, 1162, 3210, 650, 2698, 1674, 3722, 394, 2442, 1418, 3466, 906, 2954, 1930, 3978, 74, 2122, 1098, 3146, 586, 2634, 1610, 3658, 330, 2378, 1354, 3402, 842, 2890, 1866, 3914, 202, 2250, 1226, 3274, 714, 2762, 1738, 3786, 458, 2506, 1482, 3530, 970, 3018, 1994, 4042, 42, 2090, 1066, 3114, 554, 2602, 1578, 3626, 298, 2346, 1322, 3370, 810, 2858, 1834, 3882, 170, 2218, 1194, 3242, 682, 2730, 1706, 3754, 426, 2474, 1450, 3498, 938, 2986, 1962, 4010, 106, 2154, 1130, 3178, 618, 2666, 1642, 3690, 362, 2410, 1386, 3434, 874, 2922, 1898, 3946, 234, 2282, 1258, 3306, 746, 2794, 1770, 3818, 490, 2538, 1514, 3562, 1002, 3050, 2026, 4074, 26, 2074, 1050, 3098, 538, 2586, 1562, 3610, 282, 2330, 1306, 3354, 794, 2842, 1818, 3866, 154, 2202, 1178, 3226, 666, 2714, 1690, 3738, 410, 2458, 1434, 3482, 922, 2970, 1946, 3994, 90, 2138, 1114, 3162, 602, 2650, 1626, 3674, 346, 2394, 1370, 3418, 858, 2906, 1882, 3930, 218, 2266, 1242, 3290, 730, 2778, 1754, 3802, 474, 2522, 1498, 3546, 986, 3034, 2010, 4058, 58, 2106, 1082, 3130, 570, 2618, 1594, 3642, 314, 2362, 1338, 3386, 826, 2874, 1850, 3898, 186, 2234, 1210, 3258, 698, 2746, 1722, 3770, 442, 2490, 1466, 3514, 954, 3002, 1978, 4026, 122, 2170, 1146, 3194, 634, 2682, 1658, 3706, 378, 2426, 1402, 3450, 890, 2938, 1914, 3962, 250, 2298, 1274, 3322, 762, 2810, 1786, 3834, 506, 2554, 1530, 3578, 1018, 3066, 2042, 4090, 6, 2054, 1030, 3078, 518, 2566, 1542, 3590, 262, 2310, 1286, 3334, 774, 2822, 1798, 3846, 134, 2182, 1158, 3206, 646, 2694, 1670, 3718, 390, 2438, 1414, 3462, 902, 2950, 1926, 3974, 70, 2118, 1094, 3142, 582, 2630, 1606, 3654, 326, 2374, 1350, 3398, 838, 2886, 1862, 3910, 198, 2246, 1222, 3270, 710, 2758, 1734, 3782, 454, 2502, 1478, 3526, 966, 3014, 1990, 4038, 38, 2086, 1062, 3110, 550, 2598, 1574, 3622, 294, 2342, 1318, 3366, 806, 2854, 1830, 3878, 166, 2214, 1190, 3238, 678, 2726, 1702, 3750, 422, 2470, 1446, 3494, 934, 2982, 1958, 4006, 102, 2150, 1126, 3174, 614, 2662, 1638, 3686, 358, 2406, 1382, 3430, 870, 2918, 1894, 3942, 230, 2278, 1254, 3302, 742, 2790, 1766, 3814, 486, 2534, 1510, 3558, 998, 3046, 2022, 4070, 22, 2070, 1046, 3094, 534, 2582, 1558, 3606, 278, 2326, 1302, 3350, 790, 2838, 1814, 3862, 150, 2198, 1174, 3222, 662, 2710, 1686, 3734, 406, 2454, 1430, 3478, 918, 2966, 1942, 3990, 86, 2134, 1110, 3158, 598, 2646, 1622, 3670, 342, 2390, 1366, 3414, 854, 2902, 1878, 3926, 214, 2262, 1238, 3286, 726, 2774, 1750, 3798, 470, 2518, 1494, 3542, 982, 3030, 2006, 4054, 54, 2102, 1078, 3126, 566, 2614, 1590, 3638, 310, 2358, 1334, 3382, 822, 2870, 1846, 3894, 182, 2230, 1206, 3254, 694, 2742, 1718, 3766, 438, 2486, 1462, 3510, 950, 2998, 1974, 4022, 118, 2166, 1142, 3190, 630, 2678, 1654, 3702, 374, 2422, 1398, 3446, 886, 2934, 1910, 3958, 246, 2294, 1270, 3318, 758, 2806, 1782, 3830, 502, 2550, 1526, 3574, 1014, 3062, 2038, 4086, 14, 2062, 1038, 3086, 526, 2574, 1550, 3598, 270, 2318, 1294, 3342, 782, 2830, 1806, 3854, 142, 2190, 1166, 3214, 654, 2702, 1678, 3726, 398, 2446, 1422, 3470, 910, 2958, 1934, 3982, 78, 2126, 1102, 3150, 590, 2638, 1614, 3662, 334, 2382, 1358, 3406, 846, 2894, 1870, 3918, 206, 2254, 1230, 3278, 718, 2766, 1742, 3790, 462, 2510, 1486, 3534, 974, 3022, 1998, 4046, 46, 2094, 1070, 3118, 558, 2606, 1582, 3630, 302, 2350, 1326, 3374, 814, 2862, 1838, 3886, 174, 2222, 1198, 3246, 686, 2734, 1710, 3758, 430, 2478, 1454, 3502, 942, 2990, 1966, 4014, 110, 2158, 1134, 3182, 622, 2670, 1646, 3694, 366, 2414, 1390, 3438, 878, 2926, 1902, 3950, 238, 2286, 1262, 3310, 750, 2798, 1774, 3822, 494, 2542, 1518, 3566, 1006, 3054, 2030, 4078, 30, 2078, 1054, 3102, 542, 2590, 1566, 3614, 286, 2334, 1310, 3358, 798, 2846, 1822, 3870, 158, 2206, 1182, 3230, 670, 2718, 1694, 3742, 414, 2462, 1438, 3486, 926, 2974, 1950, 3998, 94, 2142, 1118, 3166, 606, 2654, 1630, 3678, 350, 2398, 1374, 3422, 862, 2910, 1886, 3934, 222, 2270, 1246, 3294, 734, 2782, 1758, 3806, 478, 2526, 1502, 3550, 990, 3038, 2014, 4062, 62, 2110, 1086, 3134, 574, 2622, 1598, 3646, 318, 2366, 1342, 3390, 830, 2878, 1854, 3902, 190, 2238, 1214, 3262, 702, 2750, 1726, 3774, 446, 2494, 1470, 3518, 958, 3006, 1982, 4030, 126, 2174, 1150, 3198, 638, 2686, 1662, 3710, 382, 2430, 1406, 3454, 894, 2942, 1918, 3966, 254, 2302, 1278, 3326, 766, 2814, 1790, 3838, 510, 2558, 1534, 3582, 1022, 3070, 2046, 4094, 1, 2049, 1025, 3073, 513, 2561, 1537, 3585, 257, 2305, 1281, 3329, 769, 2817, 1793, 3841, 129, 2177, 1153, 3201, 641, 2689, 1665, 3713, 385, 2433, 1409, 3457, 897, 2945, 1921, 3969, 65, 2113, 1089, 3137, 577, 2625, 1601, 3649, 321, 2369, 1345, 3393, 833, 2881, 1857, 3905, 193, 2241, 1217, 3265, 705, 2753, 1729, 3777, 449, 2497, 1473, 3521, 961, 3009, 1985, 4033, 33, 2081, 1057, 3105, 545, 2593, 1569, 3617, 289, 2337, 1313, 3361, 801, 2849, 1825, 3873, 161, 2209, 1185, 3233, 673, 2721, 1697, 3745, 417, 2465, 1441, 3489, 929, 2977, 1953, 4001, 97, 2145, 1121, 3169, 609, 2657, 1633, 3681, 353, 2401, 1377, 3425, 865, 2913, 1889, 3937, 225, 2273, 1249, 3297, 737, 2785, 1761, 3809, 481, 2529, 1505, 3553, 993, 3041, 2017, 4065, 17, 2065, 1041, 3089, 529, 2577, 1553, 3601, 273, 2321, 1297, 3345, 785, 2833, 1809, 3857, 145, 2193, 1169, 3217, 657, 2705, 1681, 3729, 401, 2449, 1425, 3473, 913, 2961, 1937, 3985, 81, 2129, 1105, 3153, 593, 2641, 1617, 3665, 337, 2385, 1361, 3409, 849, 2897, 1873, 3921, 209, 2257, 1233, 3281, 721, 2769, 1745, 3793, 465, 2513, 1489, 3537, 977, 3025, 2001, 4049, 49, 2097, 1073, 3121, 561, 2609, 1585, 3633, 305, 2353, 1329, 3377, 817, 2865, 1841, 3889, 177, 2225, 1201, 3249, 689, 2737, 1713, 3761, 433, 2481, 1457, 3505, 945, 2993, 1969, 4017, 113, 2161, 1137, 3185, 625, 2673, 1649, 3697, 369, 2417, 1393, 3441, 881, 2929, 1905, 3953, 241, 2289, 1265, 3313, 753, 2801, 1777, 3825, 497, 2545, 1521, 3569, 1009, 3057, 2033, 4081, 9, 2057, 1033, 3081, 521, 2569, 1545, 3593, 265, 2313, 1289, 3337, 777, 2825, 1801, 3849, 137, 2185, 1161, 3209, 649, 2697, 1673, 3721, 393, 2441, 1417, 3465, 905, 2953, 1929, 3977, 73, 2121, 1097, 3145, 585, 2633, 1609, 3657, 329, 2377, 1353, 3401, 841, 2889, 1865, 3913, 201, 2249, 1225, 3273, 713, 2761, 1737, 3785, 457, 2505, 1481, 3529, 969, 3017, 1993, 4041, 41, 2089, 1065, 3113, 553, 2601, 1577, 3625, 297, 2345, 1321, 3369, 809, 2857, 1833, 3881, 169, 2217, 1193, 3241, 681, 2729, 1705, 3753, 425, 2473, 1449, 3497, 937, 2985, 1961, 4009, 105, 2153, 1129, 3177, 617, 2665, 1641, 3689, 361, 2409, 1385, 3433, 873, 2921, 1897, 3945, 233, 2281, 1257, 3305, 745, 2793, 1769, 3817, 489, 2537, 1513, 3561, 1001, 3049, 2025, 4073, 25, 2073, 1049, 3097, 537, 2585, 1561, 3609, 281, 2329, 1305, 3353, 793, 2841, 1817, 3865, 153, 2201, 1177, 3225, 665, 2713, 1689, 3737, 409, 2457, 1433, 3481, 921, 2969, 1945, 3993, 89, 2137, 1113, 3161, 601, 2649, 1625, 3673, 345, 2393, 1369, 3417, 857, 2905, 1881, 3929, 217, 2265, 1241, 3289, 729, 2777, 1753, 3801, 473, 2521, 1497, 3545, 985, 3033, 2009, 4057, 57, 2105, 1081, 3129, 569, 2617, 1593, 3641, 313, 2361, 1337, 3385, 825, 2873, 1849, 3897, 185, 2233, 1209, 3257, 697, 2745, 1721, 3769, 441, 2489, 1465, 3513, 953, 3001, 1977, 4025, 121, 2169, 1145, 3193, 633, 2681, 1657, 3705, 377, 2425, 1401, 3449, 889, 2937, 1913, 3961, 249, 2297, 1273, 3321, 761, 2809, 1785, 3833, 505, 2553, 1529, 3577, 1017, 3065, 2041, 4089, 5, 2053, 1029, 3077, 517, 2565, 1541, 3589, 261, 2309, 1285, 3333, 773, 2821, 1797, 3845, 133, 2181, 1157, 3205, 645, 2693, 1669, 3717, 389, 2437, 1413, 3461, 901, 2949, 1925, 3973, 69, 2117, 1093, 3141, 581, 2629, 1605, 3653, 325, 2373, 1349, 3397, 837, 2885, 1861, 3909, 197, 2245, 1221, 3269, 709, 2757, 1733, 3781, 453, 2501, 1477, 3525, 965, 3013, 1989, 4037, 37, 2085, 1061, 3109, 549, 2597, 1573, 3621, 293, 2341, 1317, 3365, 805, 2853, 1829, 3877, 165, 2213, 1189, 3237, 677, 2725, 1701, 3749, 421, 2469, 1445, 3493, 933, 2981, 1957, 4005, 101, 2149, 1125, 3173, 613, 2661, 1637, 3685, 357, 2405, 1381, 3429, 869, 2917, 1893, 3941, 229, 2277, 1253, 3301, 741, 2789, 1765, 3813, 485, 2533, 1509, 3557, 997, 3045, 2021, 4069, 21, 2069, 1045, 3093, 533, 2581, 1557, 3605, 277, 2325, 1301, 3349, 789, 2837, 1813, 3861, 149, 2197, 1173, 3221, 661, 2709, 1685, 3733, 405, 2453, 1429, 3477, 917, 2965, 1941, 3989, 85, 2133, 1109, 3157, 597, 2645, 1621, 3669, 341, 2389, 1365, 3413, 853, 2901, 1877, 3925, 213, 2261, 1237, 3285, 725, 2773, 1749, 3797, 469, 2517, 1493, 3541, 981, 3029, 2005, 4053, 53, 2101, 1077, 3125, 565, 2613, 1589, 3637, 309, 2357, 1333, 3381, 821, 2869, 1845, 3893, 181, 2229, 1205, 3253, 693, 2741, 1717, 3765, 437, 2485, 1461, 3509, 949, 2997, 1973, 4021, 117, 2165, 1141, 3189, 629, 2677, 1653, 3701, 373, 2421, 1397, 3445, 885, 2933, 1909, 3957, 245, 2293, 1269, 3317, 757, 2805, 1781, 3829, 501, 2549, 1525, 3573, 1013, 3061, 2037, 4085, 13, 2061, 1037, 3085, 525, 2573, 1549, 3597, 269, 2317, 1293, 3341, 781, 2829, 1805, 3853, 141, 2189, 1165, 3213, 653, 2701, 1677, 3725, 397, 2445, 1421, 3469, 909, 2957, 1933, 3981, 77, 2125, 1101, 3149, 589, 2637, 1613, 3661, 333, 2381, 1357, 3405, 845, 2893, 1869, 3917, 205, 2253, 1229, 3277, 717, 2765, 1741, 3789, 461, 2509, 1485, 3533, 973, 3021, 1997, 4045, 45, 2093, 1069, 3117, 557, 2605, 1581, 3629, 301, 2349, 1325, 3373, 813, 2861, 1837, 3885, 173, 2221, 1197, 3245, 685, 2733, 1709, 3757, 429, 2477, 1453, 3501, 941, 2989, 1965, 4013, 109, 2157, 1133, 3181, 621, 2669, 1645, 3693, 365, 2413, 1389, 3437, 877, 2925, 1901, 3949, 237, 2285, 1261, 3309, 749, 2797, 1773, 3821, 493, 2541, 1517, 3565, 1005, 3053, 2029, 4077, 29, 2077, 1053, 3101, 541, 2589, 1565, 3613, 285, 2333, 1309, 3357, 797, 2845, 1821, 3869, 157, 2205, 1181, 3229, 669, 2717, 1693, 3741, 413, 2461, 1437, 3485, 925, 2973, 1949, 3997, 93, 2141, 1117, 3165, 605, 2653, 1629, 3677, 349, 2397, 1373, 3421, 861, 2909, 1885, 3933, 221, 2269, 1245, 3293, 733, 2781, 1757, 3805, 477, 2525, 1501, 3549, 989, 3037, 2013, 4061, 61, 2109, 1085, 3133, 573, 2621, 1597, 3645, 317, 2365, 1341, 3389, 829, 2877, 1853, 3901, 189, 2237, 1213, 3261, 701, 2749, 1725, 3773, 445, 2493, 1469, 3517, 957, 3005, 1981, 4029, 125, 2173, 1149, 3197, 637, 2685, 1661, 3709, 381, 2429, 1405, 3453, 893, 2941, 1917, 3965, 253, 2301, 1277, 3325, 765, 2813, 1789, 3837, 509, 2557, 1533, 3581, 1021, 3069, 2045, 4093, 3, 2051, 1027, 3075, 515, 2563, 1539, 3587, 259, 2307, 1283, 3331, 771, 2819, 1795, 3843, 131, 2179, 1155, 3203, 643, 2691, 1667, 3715, 387, 2435, 1411, 3459, 899, 2947, 1923, 3971, 67, 2115, 1091, 3139, 579, 2627, 1603, 3651, 323, 2371, 1347, 3395, 835, 2883, 1859, 3907, 195, 2243, 1219, 3267, 707, 2755, 1731, 3779, 451, 2499, 1475, 3523, 963, 3011, 1987, 4035, 35, 2083, 1059, 3107, 547, 2595, 1571, 3619, 291, 2339, 1315, 3363, 803, 2851, 1827, 3875, 163, 2211, 1187, 3235, 675, 2723, 1699, 3747, 419, 2467, 1443, 3491, 931, 2979, 1955, 4003, 99, 2147, 1123, 3171, 611, 2659, 1635, 3683, 355, 2403, 1379, 3427, 867, 2915, 1891, 3939, 227, 2275, 1251, 3299, 739, 2787, 1763, 3811, 483, 2531, 1507, 3555, 995, 3043, 2019, 4067, 19, 2067, 1043, 3091, 531, 2579, 1555, 3603, 275, 2323, 1299, 3347, 787, 2835, 1811, 3859, 147, 2195, 1171, 3219, 659, 2707, 1683, 3731, 403, 2451, 1427, 3475, 915, 2963, 1939, 3987, 83, 2131, 1107, 3155, 595, 2643, 1619, 3667, 339, 2387, 1363, 3411, 851, 2899, 1875, 3923, 211, 2259, 1235, 3283, 723, 2771, 1747, 3795, 467, 2515, 1491, 3539, 979, 3027, 2003, 4051, 51, 2099, 1075, 3123, 563, 2611, 1587, 3635, 307, 2355, 1331, 3379, 819, 2867, 1843, 3891, 179, 2227, 1203, 3251, 691, 2739, 1715, 3763, 435, 2483, 1459, 3507, 947, 2995, 1971, 4019, 115, 2163, 1139, 3187, 627, 2675, 1651, 3699, 371, 2419, 1395, 3443, 883, 2931, 1907, 3955, 243, 2291, 1267, 3315, 755, 2803, 1779, 3827, 499, 2547, 1523, 3571, 1011, 3059, 2035, 4083, 11, 2059, 1035, 3083, 523, 2571, 1547, 3595, 267, 2315, 1291, 3339, 779, 2827, 1803, 3851, 139, 2187, 1163, 3211, 651, 2699, 1675, 3723, 395, 2443, 1419, 3467, 907, 2955, 1931, 3979, 75, 2123, 1099, 3147, 587, 2635, 1611, 3659, 331, 2379, 1355, 3403, 843, 2891, 1867, 3915, 203, 2251, 1227, 3275, 715, 2763, 1739, 3787, 459, 2507, 1483, 3531, 971, 3019, 1995, 4043, 43, 2091, 1067, 3115, 555, 2603, 1579, 3627, 299, 2347, 1323, 3371, 811, 2859, 1835, 3883, 171, 2219, 1195, 3243, 683, 2731, 1707, 3755, 427, 2475, 1451, 3499, 939, 2987, 1963, 4011, 107, 2155, 1131, 3179, 619, 2667, 1643, 3691, 363, 2411, 1387, 3435, 875, 2923, 1899, 3947, 235, 2283, 1259, 3307, 747, 2795, 1771, 3819, 491, 2539, 1515, 3563, 1003, 3051, 2027, 4075, 27, 2075, 1051, 3099, 539, 2587, 1563, 3611, 283, 2331, 1307, 3355, 795, 2843, 1819, 3867, 155, 2203, 1179, 3227, 667, 2715, 1691, 3739, 411, 2459, 1435, 3483, 923, 2971, 1947, 3995, 91, 2139, 1115, 3163, 603, 2651, 1627, 3675, 347, 2395, 1371, 3419, 859, 2907, 1883, 3931, 219, 2267, 1243, 3291, 731, 2779, 1755, 3803, 475, 2523, 1499, 3547, 987, 3035, 2011, 4059, 59, 2107, 1083, 3131, 571, 2619, 1595, 3643, 315, 2363, 1339, 3387, 827, 2875, 1851, 3899, 187, 2235, 1211, 3259, 699, 2747, 1723, 3771, 443, 2491, 1467, 3515, 955, 3003, 1979, 4027, 123, 2171, 1147, 3195, 635, 2683, 1659, 3707, 379, 2427, 1403, 3451, 891, 2939, 1915, 3963, 251, 2299, 1275, 3323, 763, 2811, 1787, 3835, 507, 2555, 1531, 3579, 1019, 3067, 2043, 4091, 7, 2055, 1031, 3079, 519, 2567, 1543, 3591, 263, 2311, 1287, 3335, 775, 2823, 1799, 3847, 135, 2183, 1159, 3207, 647, 2695, 1671, 3719, 391, 2439, 1415, 3463, 903, 2951, 1927, 3975, 71, 2119, 1095, 3143, 583, 2631, 1607, 3655, 327, 2375, 1351, 3399, 839, 2887, 1863, 3911, 199, 2247, 1223, 3271, 711, 2759, 1735, 3783, 455, 2503, 1479, 3527, 967, 3015, 1991, 4039, 39, 2087, 1063, 3111, 551, 2599, 1575, 3623, 295, 2343, 1319, 3367, 807, 2855, 1831, 3879, 167, 2215, 1191, 3239, 679, 2727, 1703, 3751, 423, 2471, 1447, 3495, 935, 2983, 1959, 4007, 103, 2151, 1127, 3175, 615, 2663, 1639, 3687, 359, 2407, 1383, 3431, 871, 2919, 1895, 3943, 231, 2279, 1255, 3303, 743, 2791, 1767, 3815, 487, 2535, 1511, 3559, 999, 3047, 2023, 4071, 23, 2071, 1047, 3095, 535, 2583, 1559, 3607, 279, 2327, 1303, 3351, 791, 2839, 1815, 3863, 151, 2199, 1175, 3223, 663, 2711, 1687, 3735, 407, 2455, 1431, 3479, 919, 2967, 1943, 3991, 87, 2135, 1111, 3159, 599, 2647, 1623, 3671, 343, 2391, 1367, 3415, 855, 2903, 1879, 3927, 215, 2263, 1239, 3287, 727, 2775, 1751, 3799, 471, 2519, 1495, 3543, 983, 3031, 2007, 4055, 55, 2103, 1079, 3127, 567, 2615, 1591, 3639, 311, 2359, 1335, 3383, 823, 2871, 1847, 3895, 183, 2231, 1207, 3255, 695, 2743, 1719, 3767, 439, 2487, 1463, 3511, 951, 2999, 1975, 4023, 119, 2167, 1143, 3191, 631, 2679, 1655, 3703, 375, 2423, 1399, 3447, 887, 2935, 1911, 3959, 247, 2295, 1271, 3319, 759, 2807, 1783, 3831, 503, 2551, 1527, 3575, 1015, 3063, 2039, 4087, 15, 2063, 1039, 3087, 527, 2575, 1551, 3599, 271, 2319, 1295, 3343, 783, 2831, 1807, 3855, 143, 2191, 1167, 3215, 655, 2703, 1679, 3727, 399, 2447, 1423, 3471, 911, 2959, 1935, 3983, 79, 2127, 1103, 3151, 591, 2639, 1615, 3663, 335, 2383, 1359, 3407, 847, 2895, 1871, 3919, 207, 2255, 1231, 3279, 719, 2767, 1743, 3791, 463, 2511, 1487, 3535, 975, 3023, 1999, 4047, 47, 2095, 1071, 3119, 559, 2607, 1583, 3631, 303, 2351, 1327, 3375, 815, 2863, 1839, 3887, 175, 2223, 1199, 3247, 687, 2735, 1711, 3759, 431, 2479, 1455, 3503, 943, 2991, 1967, 4015, 111, 2159, 1135, 3183, 623, 2671, 1647, 3695, 367, 2415, 1391, 3439, 879, 2927, 1903, 3951, 239, 2287, 1263, 3311, 751, 2799, 1775, 3823, 495, 2543, 1519, 3567, 1007, 3055, 2031, 4079, 31, 2079, 1055, 3103, 543, 2591, 1567, 3615, 287, 2335, 1311, 3359, 799, 2847, 1823, 3871, 159, 2207, 1183, 3231, 671, 2719, 1695, 3743, 415, 2463, 1439, 3487, 927, 2975, 1951, 3999, 95, 2143, 1119, 3167, 607, 2655, 1631, 3679, 351, 2399, 1375, 3423, 863, 2911, 1887, 3935, 223, 2271, 1247, 3295, 735, 2783, 1759, 3807, 479, 2527, 1503, 3551, 991, 3039, 2015, 4063, 63, 2111, 1087, 3135, 575, 2623, 1599, 3647, 319, 2367, 1343, 3391, 831, 2879, 1855, 3903, 191, 2239, 1215, 3263, 703, 2751, 1727, 3775, 447, 2495, 1471, 3519, 959, 3007, 1983, 4031, 127, 2175, 1151, 3199, 639, 2687, 1663, 3711, 383, 2431, 1407, 3455, 895, 2943, 1919, 3967, 255, 2303, 1279, 3327, 767, 2815, 1791, 3839, 511, 2559, 1535, 3583, 1023, 3071, 2047, 4095}; int *get_indices_gpu; hipMalloc((void**)&get_indices_gpu, n * sizeof(int)); switch(n){ case 1 : hipMemcpy(get_indices_gpu, get_indices1, n * sizeof(int), hipMemcpyHostToDevice); break; case 2 : hipMemcpy(get_indices_gpu, get_indices2, n * sizeof(int), hipMemcpyHostToDevice); break; case 4 : hipMemcpy(get_indices_gpu, get_indices4, n * sizeof(int), hipMemcpyHostToDevice); break; case 8 : hipMemcpy(get_indices_gpu, get_indices8, n * sizeof(int), hipMemcpyHostToDevice); break; case 16 : hipMemcpy(get_indices_gpu, get_indices16, n * sizeof(int), hipMemcpyHostToDevice); break; case 32 : hipMemcpy(get_indices_gpu, get_indices32, n * sizeof(int), hipMemcpyHostToDevice); break; case 64 : hipMemcpy(get_indices_gpu, get_indices64, n * sizeof(int), hipMemcpyHostToDevice); break; case 128 : hipMemcpy(get_indices_gpu, get_indices128, n * sizeof(int), hipMemcpyHostToDevice); break; case 256 : hipMemcpy(get_indices_gpu, get_indices256, n * sizeof(int), hipMemcpyHostToDevice); break; case 512 : hipMemcpy(get_indices_gpu, get_indices512, n * sizeof(int), hipMemcpyHostToDevice); break; case 1024 : hipMemcpy(get_indices_gpu, get_indices1024, n * sizeof(int), hipMemcpyHostToDevice); break; case 2048 : hipMemcpy(get_indices_gpu, get_indices2048, n * sizeof(int), hipMemcpyHostToDevice); break; case 4096 : hipMemcpy(get_indices_gpu, get_indices4096, n * sizeof(int), hipMemcpyHostToDevice); break; } uint64_t*result; hipMalloc((void**)&result, size); // set a place to save the result if (n<=1024) { hipLaunchKernelGGL(( bit_reverse_gpu), dim3(batch), dim3(n), 0, 0, vec, result, get_indices_gpu, n, batch); } else { hipLaunchKernelGGL(( bit_reverse_gpu), dim3(n*batch/1024), dim3(1024), 0, 0, vec, result, get_indices_gpu, n, batch); } hipFree(get_indices_gpu); return result; }
0cd7ce719092c8897736bca84cf4984a2bbbf216.cu
#include <cmath> /* pow() */ #include <cstdint> /* uint64_t */ #include <ctime> /* time() */ #include <cstdlib> #include <unistd.h> #include <iostream> using namespace std; #include <ctime> /* time() */ #include <sys/time.h> #include <stdlib.h> #include <iostream> #include <cstdint> /* int64_t, uint64_t */ void printVec(uint64_t *vec, uint64_t n){ std::cout << "["; for(uint64_t i = 0; i < n; i++){ std::cout << vec[i] << ","; } std::cout << "]" << std::endl; } __global__ void bit_reverse_gpu(uint64_t *vec, uint64_t *result, int *indices, uint64_t n, uint64_t batch){ int batch_id = blockIdx.x; // one block (with n threads) handles one vector if possible int j = threadIdx.x; int blockdim = blockDim.x; if(blockDim.x == n){ // one block (with n threads) handles one vector // we have #batch blocks // eg. n=16, batch=4 <=> there're 4 blocks, blockDim = 16 result[ batch_id*blockdim + indices[j] ] = vec[ batch_id*blockdim + j]; } else if(blockDim.x < n){ int k = n / (blockDim.x); // eg: n=2048 while blockDim=1024, so 2 blocks handle one vector (vec seperated into 2 parts) int vec_part = blockIdx.x % k; result[ (batch_id/k)*n + indices[vec_part*blockdim + j] ] = vec[ batch_id*blockdim + j]; } } __host__ uint64_t * bit_reverse_table(uint64_t *vec, uint64_t n, uint64_t batch){ int size = n*batch * sizeof(uint64_t); int get_indices1[] = {0}; int get_indices2[] = {0, 1}; int get_indices4[] = {0, 2, 1, 3}; int get_indices8[] = {0, 4, 2, 6, 1, 5, 3, 7}; int get_indices16[] = {0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}; int get_indices32[] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10, 26, 6, 22, 14, 30, 1, 17, 9, 25, 5, 21, 13, 29, 3, 19, 11, 27, 7, 23, 15, 31}; int get_indices64[] = {0, 32, 16, 48, 8, 40, 24, 56, 4, 36, 20, 52, 12, 44, 28, 60, 2, 34, 18, 50, 10, 42, 26, 58, 6, 38, 22, 54, 14, 46, 30, 62, 1, 33, 17, 49, 9, 41, 25, 57, 5, 37, 21, 53, 13, 45, 29, 61, 3, 35, 19, 51, 11, 43, 27, 59, 7, 39, 23, 55, 15, 47, 31, 63}; int get_indices128[] = {0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88, 56, 120, 4, 68, 36, 100, 20, 84, 52, 116, 12, 76, 44, 108, 28, 92, 60, 124, 2, 66, 34, 98, 18, 82, 50, 114, 10, 74, 42, 106, 26, 90, 58, 122, 6, 70, 38, 102, 22, 86, 54, 118, 14, 78, 46, 110, 30, 94, 62, 126, 1, 65, 33, 97, 17, 81, 49, 113, 9, 73, 41, 105, 25, 89, 57, 121, 5, 69, 37, 101, 21, 85, 53, 117, 13, 77, 45, 109, 29, 93, 61, 125, 3, 67, 35, 99, 19, 83, 51, 115, 11, 75, 43, 107, 27, 91, 59, 123, 7, 71, 39, 103, 23, 87, 55, 119, 15, 79, 47, 111, 31, 95, 63, 127}; int get_indices256[] = {0, 128, 64, 192, 32, 160, 96, 224, 16, 144, 80, 208, 48, 176, 112, 240, 8, 136, 72, 200, 40, 168, 104, 232, 24, 152, 88, 216, 56, 184, 120, 248, 4, 132, 68, 196, 36, 164, 100, 228, 20, 148, 84, 212, 52, 180, 116, 244, 12, 140, 76, 204, 44, 172, 108, 236, 28, 156, 92, 220, 60, 188, 124, 252, 2, 130, 66, 194, 34, 162, 98, 226, 18, 146, 82, 210, 50, 178, 114, 242, 10, 138, 74, 202, 42, 170, 106, 234, 26, 154, 90, 218, 58, 186, 122, 250, 6, 134, 70, 198, 38, 166, 102, 230, 22, 150, 86, 214, 54, 182, 118, 246, 14, 142, 78, 206, 46, 174, 110, 238, 30, 158, 94, 222, 62, 190, 126, 254, 1, 129, 65, 193, 33, 161, 97, 225, 17, 145, 81, 209, 49, 177, 113, 241, 9, 137, 73, 201, 41, 169, 105, 233, 25, 153, 89, 217, 57, 185, 121, 249, 5, 133, 69, 197, 37, 165, 101, 229, 21, 149, 85, 213, 53, 181, 117, 245, 13, 141, 77, 205, 45, 173, 109, 237, 29, 157, 93, 221, 61, 189, 125, 253, 3, 131, 67, 195, 35, 163, 99, 227, 19, 147, 83, 211, 51, 179, 115, 243, 11, 139, 75, 203, 43, 171, 107, 235, 27, 155, 91, 219, 59, 187, 123, 251, 7, 135, 71, 199, 39, 167, 103, 231, 23, 151, 87, 215, 55, 183, 119, 247, 15, 143, 79, 207, 47, 175, 111, 239, 31, 159, 95, 223, 63, 191, 127, 255}; int get_indices512[] = {0, 256, 128, 384, 64, 320, 192, 448, 32, 288, 160, 416, 96, 352, 224, 480, 16, 272, 144, 400, 80, 336, 208, 464, 48, 304, 176, 432, 112, 368, 240, 496, 8, 264, 136, 392, 72, 328, 200, 456, 40, 296, 168, 424, 104, 360, 232, 488, 24, 280, 152, 408, 88, 344, 216, 472, 56, 312, 184, 440, 120, 376, 248, 504, 4, 260, 132, 388, 68, 324, 196, 452, 36, 292, 164, 420, 100, 356, 228, 484, 20, 276, 148, 404, 84, 340, 212, 468, 52, 308, 180, 436, 116, 372, 244, 500, 12, 268, 140, 396, 76, 332, 204, 460, 44, 300, 172, 428, 108, 364, 236, 492, 28, 284, 156, 412, 92, 348, 220, 476, 60, 316, 188, 444, 124, 380, 252, 508, 2, 258, 130, 386, 66, 322, 194, 450, 34, 290, 162, 418, 98, 354, 226, 482, 18, 274, 146, 402, 82, 338, 210, 466, 50, 306, 178, 434, 114, 370, 242, 498, 10, 266, 138, 394, 74, 330, 202, 458, 42, 298, 170, 426, 106, 362, 234, 490, 26, 282, 154, 410, 90, 346, 218, 474, 58, 314, 186, 442, 122, 378, 250, 506, 6, 262, 134, 390, 70, 326, 198, 454, 38, 294, 166, 422, 102, 358, 230, 486, 22, 278, 150, 406, 86, 342, 214, 470, 54, 310, 182, 438, 118, 374, 246, 502, 14, 270, 142, 398, 78, 334, 206, 462, 46, 302, 174, 430, 110, 366, 238, 494, 30, 286, 158, 414, 94, 350, 222, 478, 62, 318, 190, 446, 126, 382, 254, 510, 1, 257, 129, 385, 65, 321, 193, 449, 33, 289, 161, 417, 97, 353, 225, 481, 17, 273, 145, 401, 81, 337, 209, 465, 49, 305, 177, 433, 113, 369, 241, 497, 9, 265, 137, 393, 73, 329, 201, 457, 41, 297, 169, 425, 105, 361, 233, 489, 25, 281, 153, 409, 89, 345, 217, 473, 57, 313, 185, 441, 121, 377, 249, 505, 5, 261, 133, 389, 69, 325, 197, 453, 37, 293, 165, 421, 101, 357, 229, 485, 21, 277, 149, 405, 85, 341, 213, 469, 53, 309, 181, 437, 117, 373, 245, 501, 13, 269, 141, 397, 77, 333, 205, 461, 45, 301, 173, 429, 109, 365, 237, 493, 29, 285, 157, 413, 93, 349, 221, 477, 61, 317, 189, 445, 125, 381, 253, 509, 3, 259, 131, 387, 67, 323, 195, 451, 35, 291, 163, 419, 99, 355, 227, 483, 19, 275, 147, 403, 83, 339, 211, 467, 51, 307, 179, 435, 115, 371, 243, 499, 11, 267, 139, 395, 75, 331, 203, 459, 43, 299, 171, 427, 107, 363, 235, 491, 27, 283, 155, 411, 91, 347, 219, 475, 59, 315, 187, 443, 123, 379, 251, 507, 7, 263, 135, 391, 71, 327, 199, 455, 39, 295, 167, 423, 103, 359, 231, 487, 23, 279, 151, 407, 87, 343, 215, 471, 55, 311, 183, 439, 119, 375, 247, 503, 15, 271, 143, 399, 79, 335, 207, 463, 47, 303, 175, 431, 111, 367, 239, 495, 31, 287, 159, 415, 95, 351, 223, 479, 63, 319, 191, 447, 127, 383, 255, 511}; int get_indices1024[] = {0, 512, 256, 768, 128, 640, 384, 896, 64, 576, 320, 832, 192, 704, 448, 960, 32, 544, 288, 800, 160, 672, 416, 928, 96, 608, 352, 864, 224, 736, 480, 992, 16, 528, 272, 784, 144, 656, 400, 912, 80, 592, 336, 848, 208, 720, 464, 976, 48, 560, 304, 816, 176, 688, 432, 944, 112, 624, 368, 880, 240, 752, 496, 1008, 8, 520, 264, 776, 136, 648, 392, 904, 72, 584, 328, 840, 200, 712, 456, 968, 40, 552, 296, 808, 168, 680, 424, 936, 104, 616, 360, 872, 232, 744, 488, 1000, 24, 536, 280, 792, 152, 664, 408, 920, 88, 600, 344, 856, 216, 728, 472, 984, 56, 568, 312, 824, 184, 696, 440, 952, 120, 632, 376, 888, 248, 760, 504, 1016, 4, 516, 260, 772, 132, 644, 388, 900, 68, 580, 324, 836, 196, 708, 452, 964, 36, 548, 292, 804, 164, 676, 420, 932, 100, 612, 356, 868, 228, 740, 484, 996, 20, 532, 276, 788, 148, 660, 404, 916, 84, 596, 340, 852, 212, 724, 468, 980, 52, 564, 308, 820, 180, 692, 436, 948, 116, 628, 372, 884, 244, 756, 500, 1012, 12, 524, 268, 780, 140, 652, 396, 908, 76, 588, 332, 844, 204, 716, 460, 972, 44, 556, 300, 812, 172, 684, 428, 940, 108, 620, 364, 876, 236, 748, 492, 1004, 28, 540, 284, 796, 156, 668, 412, 924, 92, 604, 348, 860, 220, 732, 476, 988, 60, 572, 316, 828, 188, 700, 444, 956, 124, 636, 380, 892, 252, 764, 508, 1020, 2, 514, 258, 770, 130, 642, 386, 898, 66, 578, 322, 834, 194, 706, 450, 962, 34, 546, 290, 802, 162, 674, 418, 930, 98, 610, 354, 866, 226, 738, 482, 994, 18, 530, 274, 786, 146, 658, 402, 914, 82, 594, 338, 850, 210, 722, 466, 978, 50, 562, 306, 818, 178, 690, 434, 946, 114, 626, 370, 882, 242, 754, 498, 1010, 10, 522, 266, 778, 138, 650, 394, 906, 74, 586, 330, 842, 202, 714, 458, 970, 42, 554, 298, 810, 170, 682, 426, 938, 106, 618, 362, 874, 234, 746, 490, 1002, 26, 538, 282, 794, 154, 666, 410, 922, 90, 602, 346, 858, 218, 730, 474, 986, 58, 570, 314, 826, 186, 698, 442, 954, 122, 634, 378, 890, 250, 762, 506, 1018, 6, 518, 262, 774, 134, 646, 390, 902, 70, 582, 326, 838, 198, 710, 454, 966, 38, 550, 294, 806, 166, 678, 422, 934, 102, 614, 358, 870, 230, 742, 486, 998, 22, 534, 278, 790, 150, 662, 406, 918, 86, 598, 342, 854, 214, 726, 470, 982, 54, 566, 310, 822, 182, 694, 438, 950, 118, 630, 374, 886, 246, 758, 502, 1014, 14, 526, 270, 782, 142, 654, 398, 910, 78, 590, 334, 846, 206, 718, 462, 974, 46, 558, 302, 814, 174, 686, 430, 942, 110, 622, 366, 878, 238, 750, 494, 1006, 30, 542, 286, 798, 158, 670, 414, 926, 94, 606, 350, 862, 222, 734, 478, 990, 62, 574, 318, 830, 190, 702, 446, 958, 126, 638, 382, 894, 254, 766, 510, 1022, 1, 513, 257, 769, 129, 641, 385, 897, 65, 577, 321, 833, 193, 705, 449, 961, 33, 545, 289, 801, 161, 673, 417, 929, 97, 609, 353, 865, 225, 737, 481, 993, 17, 529, 273, 785, 145, 657, 401, 913, 81, 593, 337, 849, 209, 721, 465, 977, 49, 561, 305, 817, 177, 689, 433, 945, 113, 625, 369, 881, 241, 753, 497, 1009, 9, 521, 265, 777, 137, 649, 393, 905, 73, 585, 329, 841, 201, 713, 457, 969, 41, 553, 297, 809, 169, 681, 425, 937, 105, 617, 361, 873, 233, 745, 489, 1001, 25, 537, 281, 793, 153, 665, 409, 921, 89, 601, 345, 857, 217, 729, 473, 985, 57, 569, 313, 825, 185, 697, 441, 953, 121, 633, 377, 889, 249, 761, 505, 1017, 5, 517, 261, 773, 133, 645, 389, 901, 69, 581, 325, 837, 197, 709, 453, 965, 37, 549, 293, 805, 165, 677, 421, 933, 101, 613, 357, 869, 229, 741, 485, 997, 21, 533, 277, 789, 149, 661, 405, 917, 85, 597, 341, 853, 213, 725, 469, 981, 53, 565, 309, 821, 181, 693, 437, 949, 117, 629, 373, 885, 245, 757, 501, 1013, 13, 525, 269, 781, 141, 653, 397, 909, 77, 589, 333, 845, 205, 717, 461, 973, 45, 557, 301, 813, 173, 685, 429, 941, 109, 621, 365, 877, 237, 749, 493, 1005, 29, 541, 285, 797, 157, 669, 413, 925, 93, 605, 349, 861, 221, 733, 477, 989, 61, 573, 317, 829, 189, 701, 445, 957, 125, 637, 381, 893, 253, 765, 509, 1021, 3, 515, 259, 771, 131, 643, 387, 899, 67, 579, 323, 835, 195, 707, 451, 963, 35, 547, 291, 803, 163, 675, 419, 931, 99, 611, 355, 867, 227, 739, 483, 995, 19, 531, 275, 787, 147, 659, 403, 915, 83, 595, 339, 851, 211, 723, 467, 979, 51, 563, 307, 819, 179, 691, 435, 947, 115, 627, 371, 883, 243, 755, 499, 1011, 11, 523, 267, 779, 139, 651, 395, 907, 75, 587, 331, 843, 203, 715, 459, 971, 43, 555, 299, 811, 171, 683, 427, 939, 107, 619, 363, 875, 235, 747, 491, 1003, 27, 539, 283, 795, 155, 667, 411, 923, 91, 603, 347, 859, 219, 731, 475, 987, 59, 571, 315, 827, 187, 699, 443, 955, 123, 635, 379, 891, 251, 763, 507, 1019, 7, 519, 263, 775, 135, 647, 391, 903, 71, 583, 327, 839, 199, 711, 455, 967, 39, 551, 295, 807, 167, 679, 423, 935, 103, 615, 359, 871, 231, 743, 487, 999, 23, 535, 279, 791, 151, 663, 407, 919, 87, 599, 343, 855, 215, 727, 471, 983, 55, 567, 311, 823, 183, 695, 439, 951, 119, 631, 375, 887, 247, 759, 503, 1015, 15, 527, 271, 783, 143, 655, 399, 911, 79, 591, 335, 847, 207, 719, 463, 975, 47, 559, 303, 815, 175, 687, 431, 943, 111, 623, 367, 879, 239, 751, 495, 1007, 31, 543, 287, 799, 159, 671, 415, 927, 95, 607, 351, 863, 223, 735, 479, 991, 63, 575, 319, 831, 191, 703, 447, 959, 127, 639, 383, 895, 255, 767, 511, 1023}; int get_indices2048[] = {0, 1024, 512, 1536, 256, 1280, 768, 1792, 128, 1152, 640, 1664, 384, 1408, 896, 1920, 64, 1088, 576, 1600, 320, 1344, 832, 1856, 192, 1216, 704, 1728, 448, 1472, 960, 1984, 32, 1056, 544, 1568, 288, 1312, 800, 1824, 160, 1184, 672, 1696, 416, 1440, 928, 1952, 96, 1120, 608, 1632, 352, 1376, 864, 1888, 224, 1248, 736, 1760, 480, 1504, 992, 2016, 16, 1040, 528, 1552, 272, 1296, 784, 1808, 144, 1168, 656, 1680, 400, 1424, 912, 1936, 80, 1104, 592, 1616, 336, 1360, 848, 1872, 208, 1232, 720, 1744, 464, 1488, 976, 2000, 48, 1072, 560, 1584, 304, 1328, 816, 1840, 176, 1200, 688, 1712, 432, 1456, 944, 1968, 112, 1136, 624, 1648, 368, 1392, 880, 1904, 240, 1264, 752, 1776, 496, 1520, 1008, 2032, 8, 1032, 520, 1544, 264, 1288, 776, 1800, 136, 1160, 648, 1672, 392, 1416, 904, 1928, 72, 1096, 584, 1608, 328, 1352, 840, 1864, 200, 1224, 712, 1736, 456, 1480, 968, 1992, 40, 1064, 552, 1576, 296, 1320, 808, 1832, 168, 1192, 680, 1704, 424, 1448, 936, 1960, 104, 1128, 616, 1640, 360, 1384, 872, 1896, 232, 1256, 744, 1768, 488, 1512, 1000, 2024, 24, 1048, 536, 1560, 280, 1304, 792, 1816, 152, 1176, 664, 1688, 408, 1432, 920, 1944, 88, 1112, 600, 1624, 344, 1368, 856, 1880, 216, 1240, 728, 1752, 472, 1496, 984, 2008, 56, 1080, 568, 1592, 312, 1336, 824, 1848, 184, 1208, 696, 1720, 440, 1464, 952, 1976, 120, 1144, 632, 1656, 376, 1400, 888, 1912, 248, 1272, 760, 1784, 504, 1528, 1016, 2040, 4, 1028, 516, 1540, 260, 1284, 772, 1796, 132, 1156, 644, 1668, 388, 1412, 900, 1924, 68, 1092, 580, 1604, 324, 1348, 836, 1860, 196, 1220, 708, 1732, 452, 1476, 964, 1988, 36, 1060, 548, 1572, 292, 1316, 804, 1828, 164, 1188, 676, 1700, 420, 1444, 932, 1956, 100, 1124, 612, 1636, 356, 1380, 868, 1892, 228, 1252, 740, 1764, 484, 1508, 996, 2020, 20, 1044, 532, 1556, 276, 1300, 788, 1812, 148, 1172, 660, 1684, 404, 1428, 916, 1940, 84, 1108, 596, 1620, 340, 1364, 852, 1876, 212, 1236, 724, 1748, 468, 1492, 980, 2004, 52, 1076, 564, 1588, 308, 1332, 820, 1844, 180, 1204, 692, 1716, 436, 1460, 948, 1972, 116, 1140, 628, 1652, 372, 1396, 884, 1908, 244, 1268, 756, 1780, 500, 1524, 1012, 2036, 12, 1036, 524, 1548, 268, 1292, 780, 1804, 140, 1164, 652, 1676, 396, 1420, 908, 1932, 76, 1100, 588, 1612, 332, 1356, 844, 1868, 204, 1228, 716, 1740, 460, 1484, 972, 1996, 44, 1068, 556, 1580, 300, 1324, 812, 1836, 172, 1196, 684, 1708, 428, 1452, 940, 1964, 108, 1132, 620, 1644, 364, 1388, 876, 1900, 236, 1260, 748, 1772, 492, 1516, 1004, 2028, 28, 1052, 540, 1564, 284, 1308, 796, 1820, 156, 1180, 668, 1692, 412, 1436, 924, 1948, 92, 1116, 604, 1628, 348, 1372, 860, 1884, 220, 1244, 732, 1756, 476, 1500, 988, 2012, 60, 1084, 572, 1596, 316, 1340, 828, 1852, 188, 1212, 700, 1724, 444, 1468, 956, 1980, 124, 1148, 636, 1660, 380, 1404, 892, 1916, 252, 1276, 764, 1788, 508, 1532, 1020, 2044, 2, 1026, 514, 1538, 258, 1282, 770, 1794, 130, 1154, 642, 1666, 386, 1410, 898, 1922, 66, 1090, 578, 1602, 322, 1346, 834, 1858, 194, 1218, 706, 1730, 450, 1474, 962, 1986, 34, 1058, 546, 1570, 290, 1314, 802, 1826, 162, 1186, 674, 1698, 418, 1442, 930, 1954, 98, 1122, 610, 1634, 354, 1378, 866, 1890, 226, 1250, 738, 1762, 482, 1506, 994, 2018, 18, 1042, 530, 1554, 274, 1298, 786, 1810, 146, 1170, 658, 1682, 402, 1426, 914, 1938, 82, 1106, 594, 1618, 338, 1362, 850, 1874, 210, 1234, 722, 1746, 466, 1490, 978, 2002, 50, 1074, 562, 1586, 306, 1330, 818, 1842, 178, 1202, 690, 1714, 434, 1458, 946, 1970, 114, 1138, 626, 1650, 370, 1394, 882, 1906, 242, 1266, 754, 1778, 498, 1522, 1010, 2034, 10, 1034, 522, 1546, 266, 1290, 778, 1802, 138, 1162, 650, 1674, 394, 1418, 906, 1930, 74, 1098, 586, 1610, 330, 1354, 842, 1866, 202, 1226, 714, 1738, 458, 1482, 970, 1994, 42, 1066, 554, 1578, 298, 1322, 810, 1834, 170, 1194, 682, 1706, 426, 1450, 938, 1962, 106, 1130, 618, 1642, 362, 1386, 874, 1898, 234, 1258, 746, 1770, 490, 1514, 1002, 2026, 26, 1050, 538, 1562, 282, 1306, 794, 1818, 154, 1178, 666, 1690, 410, 1434, 922, 1946, 90, 1114, 602, 1626, 346, 1370, 858, 1882, 218, 1242, 730, 1754, 474, 1498, 986, 2010, 58, 1082, 570, 1594, 314, 1338, 826, 1850, 186, 1210, 698, 1722, 442, 1466, 954, 1978, 122, 1146, 634, 1658, 378, 1402, 890, 1914, 250, 1274, 762, 1786, 506, 1530, 1018, 2042, 6, 1030, 518, 1542, 262, 1286, 774, 1798, 134, 1158, 646, 1670, 390, 1414, 902, 1926, 70, 1094, 582, 1606, 326, 1350, 838, 1862, 198, 1222, 710, 1734, 454, 1478, 966, 1990, 38, 1062, 550, 1574, 294, 1318, 806, 1830, 166, 1190, 678, 1702, 422, 1446, 934, 1958, 102, 1126, 614, 1638, 358, 1382, 870, 1894, 230, 1254, 742, 1766, 486, 1510, 998, 2022, 22, 1046, 534, 1558, 278, 1302, 790, 1814, 150, 1174, 662, 1686, 406, 1430, 918, 1942, 86, 1110, 598, 1622, 342, 1366, 854, 1878, 214, 1238, 726, 1750, 470, 1494, 982, 2006, 54, 1078, 566, 1590, 310, 1334, 822, 1846, 182, 1206, 694, 1718, 438, 1462, 950, 1974, 118, 1142, 630, 1654, 374, 1398, 886, 1910, 246, 1270, 758, 1782, 502, 1526, 1014, 2038, 14, 1038, 526, 1550, 270, 1294, 782, 1806, 142, 1166, 654, 1678, 398, 1422, 910, 1934, 78, 1102, 590, 1614, 334, 1358, 846, 1870, 206, 1230, 718, 1742, 462, 1486, 974, 1998, 46, 1070, 558, 1582, 302, 1326, 814, 1838, 174, 1198, 686, 1710, 430, 1454, 942, 1966, 110, 1134, 622, 1646, 366, 1390, 878, 1902, 238, 1262, 750, 1774, 494, 1518, 1006, 2030, 30, 1054, 542, 1566, 286, 1310, 798, 1822, 158, 1182, 670, 1694, 414, 1438, 926, 1950, 94, 1118, 606, 1630, 350, 1374, 862, 1886, 222, 1246, 734, 1758, 478, 1502, 990, 2014, 62, 1086, 574, 1598, 318, 1342, 830, 1854, 190, 1214, 702, 1726, 446, 1470, 958, 1982, 126, 1150, 638, 1662, 382, 1406, 894, 1918, 254, 1278, 766, 1790, 510, 1534, 1022, 2046, 1, 1025, 513, 1537, 257, 1281, 769, 1793, 129, 1153, 641, 1665, 385, 1409, 897, 1921, 65, 1089, 577, 1601, 321, 1345, 833, 1857, 193, 1217, 705, 1729, 449, 1473, 961, 1985, 33, 1057, 545, 1569, 289, 1313, 801, 1825, 161, 1185, 673, 1697, 417, 1441, 929, 1953, 97, 1121, 609, 1633, 353, 1377, 865, 1889, 225, 1249, 737, 1761, 481, 1505, 993, 2017, 17, 1041, 529, 1553, 273, 1297, 785, 1809, 145, 1169, 657, 1681, 401, 1425, 913, 1937, 81, 1105, 593, 1617, 337, 1361, 849, 1873, 209, 1233, 721, 1745, 465, 1489, 977, 2001, 49, 1073, 561, 1585, 305, 1329, 817, 1841, 177, 1201, 689, 1713, 433, 1457, 945, 1969, 113, 1137, 625, 1649, 369, 1393, 881, 1905, 241, 1265, 753, 1777, 497, 1521, 1009, 2033, 9, 1033, 521, 1545, 265, 1289, 777, 1801, 137, 1161, 649, 1673, 393, 1417, 905, 1929, 73, 1097, 585, 1609, 329, 1353, 841, 1865, 201, 1225, 713, 1737, 457, 1481, 969, 1993, 41, 1065, 553, 1577, 297, 1321, 809, 1833, 169, 1193, 681, 1705, 425, 1449, 937, 1961, 105, 1129, 617, 1641, 361, 1385, 873, 1897, 233, 1257, 745, 1769, 489, 1513, 1001, 2025, 25, 1049, 537, 1561, 281, 1305, 793, 1817, 153, 1177, 665, 1689, 409, 1433, 921, 1945, 89, 1113, 601, 1625, 345, 1369, 857, 1881, 217, 1241, 729, 1753, 473, 1497, 985, 2009, 57, 1081, 569, 1593, 313, 1337, 825, 1849, 185, 1209, 697, 1721, 441, 1465, 953, 1977, 121, 1145, 633, 1657, 377, 1401, 889, 1913, 249, 1273, 761, 1785, 505, 1529, 1017, 2041, 5, 1029, 517, 1541, 261, 1285, 773, 1797, 133, 1157, 645, 1669, 389, 1413, 901, 1925, 69, 1093, 581, 1605, 325, 1349, 837, 1861, 197, 1221, 709, 1733, 453, 1477, 965, 1989, 37, 1061, 549, 1573, 293, 1317, 805, 1829, 165, 1189, 677, 1701, 421, 1445, 933, 1957, 101, 1125, 613, 1637, 357, 1381, 869, 1893, 229, 1253, 741, 1765, 485, 1509, 997, 2021, 21, 1045, 533, 1557, 277, 1301, 789, 1813, 149, 1173, 661, 1685, 405, 1429, 917, 1941, 85, 1109, 597, 1621, 341, 1365, 853, 1877, 213, 1237, 725, 1749, 469, 1493, 981, 2005, 53, 1077, 565, 1589, 309, 1333, 821, 1845, 181, 1205, 693, 1717, 437, 1461, 949, 1973, 117, 1141, 629, 1653, 373, 1397, 885, 1909, 245, 1269, 757, 1781, 501, 1525, 1013, 2037, 13, 1037, 525, 1549, 269, 1293, 781, 1805, 141, 1165, 653, 1677, 397, 1421, 909, 1933, 77, 1101, 589, 1613, 333, 1357, 845, 1869, 205, 1229, 717, 1741, 461, 1485, 973, 1997, 45, 1069, 557, 1581, 301, 1325, 813, 1837, 173, 1197, 685, 1709, 429, 1453, 941, 1965, 109, 1133, 621, 1645, 365, 1389, 877, 1901, 237, 1261, 749, 1773, 493, 1517, 1005, 2029, 29, 1053, 541, 1565, 285, 1309, 797, 1821, 157, 1181, 669, 1693, 413, 1437, 925, 1949, 93, 1117, 605, 1629, 349, 1373, 861, 1885, 221, 1245, 733, 1757, 477, 1501, 989, 2013, 61, 1085, 573, 1597, 317, 1341, 829, 1853, 189, 1213, 701, 1725, 445, 1469, 957, 1981, 125, 1149, 637, 1661, 381, 1405, 893, 1917, 253, 1277, 765, 1789, 509, 1533, 1021, 2045, 3, 1027, 515, 1539, 259, 1283, 771, 1795, 131, 1155, 643, 1667, 387, 1411, 899, 1923, 67, 1091, 579, 1603, 323, 1347, 835, 1859, 195, 1219, 707, 1731, 451, 1475, 963, 1987, 35, 1059, 547, 1571, 291, 1315, 803, 1827, 163, 1187, 675, 1699, 419, 1443, 931, 1955, 99, 1123, 611, 1635, 355, 1379, 867, 1891, 227, 1251, 739, 1763, 483, 1507, 995, 2019, 19, 1043, 531, 1555, 275, 1299, 787, 1811, 147, 1171, 659, 1683, 403, 1427, 915, 1939, 83, 1107, 595, 1619, 339, 1363, 851, 1875, 211, 1235, 723, 1747, 467, 1491, 979, 2003, 51, 1075, 563, 1587, 307, 1331, 819, 1843, 179, 1203, 691, 1715, 435, 1459, 947, 1971, 115, 1139, 627, 1651, 371, 1395, 883, 1907, 243, 1267, 755, 1779, 499, 1523, 1011, 2035, 11, 1035, 523, 1547, 267, 1291, 779, 1803, 139, 1163, 651, 1675, 395, 1419, 907, 1931, 75, 1099, 587, 1611, 331, 1355, 843, 1867, 203, 1227, 715, 1739, 459, 1483, 971, 1995, 43, 1067, 555, 1579, 299, 1323, 811, 1835, 171, 1195, 683, 1707, 427, 1451, 939, 1963, 107, 1131, 619, 1643, 363, 1387, 875, 1899, 235, 1259, 747, 1771, 491, 1515, 1003, 2027, 27, 1051, 539, 1563, 283, 1307, 795, 1819, 155, 1179, 667, 1691, 411, 1435, 923, 1947, 91, 1115, 603, 1627, 347, 1371, 859, 1883, 219, 1243, 731, 1755, 475, 1499, 987, 2011, 59, 1083, 571, 1595, 315, 1339, 827, 1851, 187, 1211, 699, 1723, 443, 1467, 955, 1979, 123, 1147, 635, 1659, 379, 1403, 891, 1915, 251, 1275, 763, 1787, 507, 1531, 1019, 2043, 7, 1031, 519, 1543, 263, 1287, 775, 1799, 135, 1159, 647, 1671, 391, 1415, 903, 1927, 71, 1095, 583, 1607, 327, 1351, 839, 1863, 199, 1223, 711, 1735, 455, 1479, 967, 1991, 39, 1063, 551, 1575, 295, 1319, 807, 1831, 167, 1191, 679, 1703, 423, 1447, 935, 1959, 103, 1127, 615, 1639, 359, 1383, 871, 1895, 231, 1255, 743, 1767, 487, 1511, 999, 2023, 23, 1047, 535, 1559, 279, 1303, 791, 1815, 151, 1175, 663, 1687, 407, 1431, 919, 1943, 87, 1111, 599, 1623, 343, 1367, 855, 1879, 215, 1239, 727, 1751, 471, 1495, 983, 2007, 55, 1079, 567, 1591, 311, 1335, 823, 1847, 183, 1207, 695, 1719, 439, 1463, 951, 1975, 119, 1143, 631, 1655, 375, 1399, 887, 1911, 247, 1271, 759, 1783, 503, 1527, 1015, 2039, 15, 1039, 527, 1551, 271, 1295, 783, 1807, 143, 1167, 655, 1679, 399, 1423, 911, 1935, 79, 1103, 591, 1615, 335, 1359, 847, 1871, 207, 1231, 719, 1743, 463, 1487, 975, 1999, 47, 1071, 559, 1583, 303, 1327, 815, 1839, 175, 1199, 687, 1711, 431, 1455, 943, 1967, 111, 1135, 623, 1647, 367, 1391, 879, 1903, 239, 1263, 751, 1775, 495, 1519, 1007, 2031, 31, 1055, 543, 1567, 287, 1311, 799, 1823, 159, 1183, 671, 1695, 415, 1439, 927, 1951, 95, 1119, 607, 1631, 351, 1375, 863, 1887, 223, 1247, 735, 1759, 479, 1503, 991, 2015, 63, 1087, 575, 1599, 319, 1343, 831, 1855, 191, 1215, 703, 1727, 447, 1471, 959, 1983, 127, 1151, 639, 1663, 383, 1407, 895, 1919, 255, 1279, 767, 1791, 511, 1535, 1023, 2047}; int get_indices4096[] = {0, 2048, 1024, 3072, 512, 2560, 1536, 3584, 256, 2304, 1280, 3328, 768, 2816, 1792, 3840, 128, 2176, 1152, 3200, 640, 2688, 1664, 3712, 384, 2432, 1408, 3456, 896, 2944, 1920, 3968, 64, 2112, 1088, 3136, 576, 2624, 1600, 3648, 320, 2368, 1344, 3392, 832, 2880, 1856, 3904, 192, 2240, 1216, 3264, 704, 2752, 1728, 3776, 448, 2496, 1472, 3520, 960, 3008, 1984, 4032, 32, 2080, 1056, 3104, 544, 2592, 1568, 3616, 288, 2336, 1312, 3360, 800, 2848, 1824, 3872, 160, 2208, 1184, 3232, 672, 2720, 1696, 3744, 416, 2464, 1440, 3488, 928, 2976, 1952, 4000, 96, 2144, 1120, 3168, 608, 2656, 1632, 3680, 352, 2400, 1376, 3424, 864, 2912, 1888, 3936, 224, 2272, 1248, 3296, 736, 2784, 1760, 3808, 480, 2528, 1504, 3552, 992, 3040, 2016, 4064, 16, 2064, 1040, 3088, 528, 2576, 1552, 3600, 272, 2320, 1296, 3344, 784, 2832, 1808, 3856, 144, 2192, 1168, 3216, 656, 2704, 1680, 3728, 400, 2448, 1424, 3472, 912, 2960, 1936, 3984, 80, 2128, 1104, 3152, 592, 2640, 1616, 3664, 336, 2384, 1360, 3408, 848, 2896, 1872, 3920, 208, 2256, 1232, 3280, 720, 2768, 1744, 3792, 464, 2512, 1488, 3536, 976, 3024, 2000, 4048, 48, 2096, 1072, 3120, 560, 2608, 1584, 3632, 304, 2352, 1328, 3376, 816, 2864, 1840, 3888, 176, 2224, 1200, 3248, 688, 2736, 1712, 3760, 432, 2480, 1456, 3504, 944, 2992, 1968, 4016, 112, 2160, 1136, 3184, 624, 2672, 1648, 3696, 368, 2416, 1392, 3440, 880, 2928, 1904, 3952, 240, 2288, 1264, 3312, 752, 2800, 1776, 3824, 496, 2544, 1520, 3568, 1008, 3056, 2032, 4080, 8, 2056, 1032, 3080, 520, 2568, 1544, 3592, 264, 2312, 1288, 3336, 776, 2824, 1800, 3848, 136, 2184, 1160, 3208, 648, 2696, 1672, 3720, 392, 2440, 1416, 3464, 904, 2952, 1928, 3976, 72, 2120, 1096, 3144, 584, 2632, 1608, 3656, 328, 2376, 1352, 3400, 840, 2888, 1864, 3912, 200, 2248, 1224, 3272, 712, 2760, 1736, 3784, 456, 2504, 1480, 3528, 968, 3016, 1992, 4040, 40, 2088, 1064, 3112, 552, 2600, 1576, 3624, 296, 2344, 1320, 3368, 808, 2856, 1832, 3880, 168, 2216, 1192, 3240, 680, 2728, 1704, 3752, 424, 2472, 1448, 3496, 936, 2984, 1960, 4008, 104, 2152, 1128, 3176, 616, 2664, 1640, 3688, 360, 2408, 1384, 3432, 872, 2920, 1896, 3944, 232, 2280, 1256, 3304, 744, 2792, 1768, 3816, 488, 2536, 1512, 3560, 1000, 3048, 2024, 4072, 24, 2072, 1048, 3096, 536, 2584, 1560, 3608, 280, 2328, 1304, 3352, 792, 2840, 1816, 3864, 152, 2200, 1176, 3224, 664, 2712, 1688, 3736, 408, 2456, 1432, 3480, 920, 2968, 1944, 3992, 88, 2136, 1112, 3160, 600, 2648, 1624, 3672, 344, 2392, 1368, 3416, 856, 2904, 1880, 3928, 216, 2264, 1240, 3288, 728, 2776, 1752, 3800, 472, 2520, 1496, 3544, 984, 3032, 2008, 4056, 56, 2104, 1080, 3128, 568, 2616, 1592, 3640, 312, 2360, 1336, 3384, 824, 2872, 1848, 3896, 184, 2232, 1208, 3256, 696, 2744, 1720, 3768, 440, 2488, 1464, 3512, 952, 3000, 1976, 4024, 120, 2168, 1144, 3192, 632, 2680, 1656, 3704, 376, 2424, 1400, 3448, 888, 2936, 1912, 3960, 248, 2296, 1272, 3320, 760, 2808, 1784, 3832, 504, 2552, 1528, 3576, 1016, 3064, 2040, 4088, 4, 2052, 1028, 3076, 516, 2564, 1540, 3588, 260, 2308, 1284, 3332, 772, 2820, 1796, 3844, 132, 2180, 1156, 3204, 644, 2692, 1668, 3716, 388, 2436, 1412, 3460, 900, 2948, 1924, 3972, 68, 2116, 1092, 3140, 580, 2628, 1604, 3652, 324, 2372, 1348, 3396, 836, 2884, 1860, 3908, 196, 2244, 1220, 3268, 708, 2756, 1732, 3780, 452, 2500, 1476, 3524, 964, 3012, 1988, 4036, 36, 2084, 1060, 3108, 548, 2596, 1572, 3620, 292, 2340, 1316, 3364, 804, 2852, 1828, 3876, 164, 2212, 1188, 3236, 676, 2724, 1700, 3748, 420, 2468, 1444, 3492, 932, 2980, 1956, 4004, 100, 2148, 1124, 3172, 612, 2660, 1636, 3684, 356, 2404, 1380, 3428, 868, 2916, 1892, 3940, 228, 2276, 1252, 3300, 740, 2788, 1764, 3812, 484, 2532, 1508, 3556, 996, 3044, 2020, 4068, 20, 2068, 1044, 3092, 532, 2580, 1556, 3604, 276, 2324, 1300, 3348, 788, 2836, 1812, 3860, 148, 2196, 1172, 3220, 660, 2708, 1684, 3732, 404, 2452, 1428, 3476, 916, 2964, 1940, 3988, 84, 2132, 1108, 3156, 596, 2644, 1620, 3668, 340, 2388, 1364, 3412, 852, 2900, 1876, 3924, 212, 2260, 1236, 3284, 724, 2772, 1748, 3796, 468, 2516, 1492, 3540, 980, 3028, 2004, 4052, 52, 2100, 1076, 3124, 564, 2612, 1588, 3636, 308, 2356, 1332, 3380, 820, 2868, 1844, 3892, 180, 2228, 1204, 3252, 692, 2740, 1716, 3764, 436, 2484, 1460, 3508, 948, 2996, 1972, 4020, 116, 2164, 1140, 3188, 628, 2676, 1652, 3700, 372, 2420, 1396, 3444, 884, 2932, 1908, 3956, 244, 2292, 1268, 3316, 756, 2804, 1780, 3828, 500, 2548, 1524, 3572, 1012, 3060, 2036, 4084, 12, 2060, 1036, 3084, 524, 2572, 1548, 3596, 268, 2316, 1292, 3340, 780, 2828, 1804, 3852, 140, 2188, 1164, 3212, 652, 2700, 1676, 3724, 396, 2444, 1420, 3468, 908, 2956, 1932, 3980, 76, 2124, 1100, 3148, 588, 2636, 1612, 3660, 332, 2380, 1356, 3404, 844, 2892, 1868, 3916, 204, 2252, 1228, 3276, 716, 2764, 1740, 3788, 460, 2508, 1484, 3532, 972, 3020, 1996, 4044, 44, 2092, 1068, 3116, 556, 2604, 1580, 3628, 300, 2348, 1324, 3372, 812, 2860, 1836, 3884, 172, 2220, 1196, 3244, 684, 2732, 1708, 3756, 428, 2476, 1452, 3500, 940, 2988, 1964, 4012, 108, 2156, 1132, 3180, 620, 2668, 1644, 3692, 364, 2412, 1388, 3436, 876, 2924, 1900, 3948, 236, 2284, 1260, 3308, 748, 2796, 1772, 3820, 492, 2540, 1516, 3564, 1004, 3052, 2028, 4076, 28, 2076, 1052, 3100, 540, 2588, 1564, 3612, 284, 2332, 1308, 3356, 796, 2844, 1820, 3868, 156, 2204, 1180, 3228, 668, 2716, 1692, 3740, 412, 2460, 1436, 3484, 924, 2972, 1948, 3996, 92, 2140, 1116, 3164, 604, 2652, 1628, 3676, 348, 2396, 1372, 3420, 860, 2908, 1884, 3932, 220, 2268, 1244, 3292, 732, 2780, 1756, 3804, 476, 2524, 1500, 3548, 988, 3036, 2012, 4060, 60, 2108, 1084, 3132, 572, 2620, 1596, 3644, 316, 2364, 1340, 3388, 828, 2876, 1852, 3900, 188, 2236, 1212, 3260, 700, 2748, 1724, 3772, 444, 2492, 1468, 3516, 956, 3004, 1980, 4028, 124, 2172, 1148, 3196, 636, 2684, 1660, 3708, 380, 2428, 1404, 3452, 892, 2940, 1916, 3964, 252, 2300, 1276, 3324, 764, 2812, 1788, 3836, 508, 2556, 1532, 3580, 1020, 3068, 2044, 4092, 2, 2050, 1026, 3074, 514, 2562, 1538, 3586, 258, 2306, 1282, 3330, 770, 2818, 1794, 3842, 130, 2178, 1154, 3202, 642, 2690, 1666, 3714, 386, 2434, 1410, 3458, 898, 2946, 1922, 3970, 66, 2114, 1090, 3138, 578, 2626, 1602, 3650, 322, 2370, 1346, 3394, 834, 2882, 1858, 3906, 194, 2242, 1218, 3266, 706, 2754, 1730, 3778, 450, 2498, 1474, 3522, 962, 3010, 1986, 4034, 34, 2082, 1058, 3106, 546, 2594, 1570, 3618, 290, 2338, 1314, 3362, 802, 2850, 1826, 3874, 162, 2210, 1186, 3234, 674, 2722, 1698, 3746, 418, 2466, 1442, 3490, 930, 2978, 1954, 4002, 98, 2146, 1122, 3170, 610, 2658, 1634, 3682, 354, 2402, 1378, 3426, 866, 2914, 1890, 3938, 226, 2274, 1250, 3298, 738, 2786, 1762, 3810, 482, 2530, 1506, 3554, 994, 3042, 2018, 4066, 18, 2066, 1042, 3090, 530, 2578, 1554, 3602, 274, 2322, 1298, 3346, 786, 2834, 1810, 3858, 146, 2194, 1170, 3218, 658, 2706, 1682, 3730, 402, 2450, 1426, 3474, 914, 2962, 1938, 3986, 82, 2130, 1106, 3154, 594, 2642, 1618, 3666, 338, 2386, 1362, 3410, 850, 2898, 1874, 3922, 210, 2258, 1234, 3282, 722, 2770, 1746, 3794, 466, 2514, 1490, 3538, 978, 3026, 2002, 4050, 50, 2098, 1074, 3122, 562, 2610, 1586, 3634, 306, 2354, 1330, 3378, 818, 2866, 1842, 3890, 178, 2226, 1202, 3250, 690, 2738, 1714, 3762, 434, 2482, 1458, 3506, 946, 2994, 1970, 4018, 114, 2162, 1138, 3186, 626, 2674, 1650, 3698, 370, 2418, 1394, 3442, 882, 2930, 1906, 3954, 242, 2290, 1266, 3314, 754, 2802, 1778, 3826, 498, 2546, 1522, 3570, 1010, 3058, 2034, 4082, 10, 2058, 1034, 3082, 522, 2570, 1546, 3594, 266, 2314, 1290, 3338, 778, 2826, 1802, 3850, 138, 2186, 1162, 3210, 650, 2698, 1674, 3722, 394, 2442, 1418, 3466, 906, 2954, 1930, 3978, 74, 2122, 1098, 3146, 586, 2634, 1610, 3658, 330, 2378, 1354, 3402, 842, 2890, 1866, 3914, 202, 2250, 1226, 3274, 714, 2762, 1738, 3786, 458, 2506, 1482, 3530, 970, 3018, 1994, 4042, 42, 2090, 1066, 3114, 554, 2602, 1578, 3626, 298, 2346, 1322, 3370, 810, 2858, 1834, 3882, 170, 2218, 1194, 3242, 682, 2730, 1706, 3754, 426, 2474, 1450, 3498, 938, 2986, 1962, 4010, 106, 2154, 1130, 3178, 618, 2666, 1642, 3690, 362, 2410, 1386, 3434, 874, 2922, 1898, 3946, 234, 2282, 1258, 3306, 746, 2794, 1770, 3818, 490, 2538, 1514, 3562, 1002, 3050, 2026, 4074, 26, 2074, 1050, 3098, 538, 2586, 1562, 3610, 282, 2330, 1306, 3354, 794, 2842, 1818, 3866, 154, 2202, 1178, 3226, 666, 2714, 1690, 3738, 410, 2458, 1434, 3482, 922, 2970, 1946, 3994, 90, 2138, 1114, 3162, 602, 2650, 1626, 3674, 346, 2394, 1370, 3418, 858, 2906, 1882, 3930, 218, 2266, 1242, 3290, 730, 2778, 1754, 3802, 474, 2522, 1498, 3546, 986, 3034, 2010, 4058, 58, 2106, 1082, 3130, 570, 2618, 1594, 3642, 314, 2362, 1338, 3386, 826, 2874, 1850, 3898, 186, 2234, 1210, 3258, 698, 2746, 1722, 3770, 442, 2490, 1466, 3514, 954, 3002, 1978, 4026, 122, 2170, 1146, 3194, 634, 2682, 1658, 3706, 378, 2426, 1402, 3450, 890, 2938, 1914, 3962, 250, 2298, 1274, 3322, 762, 2810, 1786, 3834, 506, 2554, 1530, 3578, 1018, 3066, 2042, 4090, 6, 2054, 1030, 3078, 518, 2566, 1542, 3590, 262, 2310, 1286, 3334, 774, 2822, 1798, 3846, 134, 2182, 1158, 3206, 646, 2694, 1670, 3718, 390, 2438, 1414, 3462, 902, 2950, 1926, 3974, 70, 2118, 1094, 3142, 582, 2630, 1606, 3654, 326, 2374, 1350, 3398, 838, 2886, 1862, 3910, 198, 2246, 1222, 3270, 710, 2758, 1734, 3782, 454, 2502, 1478, 3526, 966, 3014, 1990, 4038, 38, 2086, 1062, 3110, 550, 2598, 1574, 3622, 294, 2342, 1318, 3366, 806, 2854, 1830, 3878, 166, 2214, 1190, 3238, 678, 2726, 1702, 3750, 422, 2470, 1446, 3494, 934, 2982, 1958, 4006, 102, 2150, 1126, 3174, 614, 2662, 1638, 3686, 358, 2406, 1382, 3430, 870, 2918, 1894, 3942, 230, 2278, 1254, 3302, 742, 2790, 1766, 3814, 486, 2534, 1510, 3558, 998, 3046, 2022, 4070, 22, 2070, 1046, 3094, 534, 2582, 1558, 3606, 278, 2326, 1302, 3350, 790, 2838, 1814, 3862, 150, 2198, 1174, 3222, 662, 2710, 1686, 3734, 406, 2454, 1430, 3478, 918, 2966, 1942, 3990, 86, 2134, 1110, 3158, 598, 2646, 1622, 3670, 342, 2390, 1366, 3414, 854, 2902, 1878, 3926, 214, 2262, 1238, 3286, 726, 2774, 1750, 3798, 470, 2518, 1494, 3542, 982, 3030, 2006, 4054, 54, 2102, 1078, 3126, 566, 2614, 1590, 3638, 310, 2358, 1334, 3382, 822, 2870, 1846, 3894, 182, 2230, 1206, 3254, 694, 2742, 1718, 3766, 438, 2486, 1462, 3510, 950, 2998, 1974, 4022, 118, 2166, 1142, 3190, 630, 2678, 1654, 3702, 374, 2422, 1398, 3446, 886, 2934, 1910, 3958, 246, 2294, 1270, 3318, 758, 2806, 1782, 3830, 502, 2550, 1526, 3574, 1014, 3062, 2038, 4086, 14, 2062, 1038, 3086, 526, 2574, 1550, 3598, 270, 2318, 1294, 3342, 782, 2830, 1806, 3854, 142, 2190, 1166, 3214, 654, 2702, 1678, 3726, 398, 2446, 1422, 3470, 910, 2958, 1934, 3982, 78, 2126, 1102, 3150, 590, 2638, 1614, 3662, 334, 2382, 1358, 3406, 846, 2894, 1870, 3918, 206, 2254, 1230, 3278, 718, 2766, 1742, 3790, 462, 2510, 1486, 3534, 974, 3022, 1998, 4046, 46, 2094, 1070, 3118, 558, 2606, 1582, 3630, 302, 2350, 1326, 3374, 814, 2862, 1838, 3886, 174, 2222, 1198, 3246, 686, 2734, 1710, 3758, 430, 2478, 1454, 3502, 942, 2990, 1966, 4014, 110, 2158, 1134, 3182, 622, 2670, 1646, 3694, 366, 2414, 1390, 3438, 878, 2926, 1902, 3950, 238, 2286, 1262, 3310, 750, 2798, 1774, 3822, 494, 2542, 1518, 3566, 1006, 3054, 2030, 4078, 30, 2078, 1054, 3102, 542, 2590, 1566, 3614, 286, 2334, 1310, 3358, 798, 2846, 1822, 3870, 158, 2206, 1182, 3230, 670, 2718, 1694, 3742, 414, 2462, 1438, 3486, 926, 2974, 1950, 3998, 94, 2142, 1118, 3166, 606, 2654, 1630, 3678, 350, 2398, 1374, 3422, 862, 2910, 1886, 3934, 222, 2270, 1246, 3294, 734, 2782, 1758, 3806, 478, 2526, 1502, 3550, 990, 3038, 2014, 4062, 62, 2110, 1086, 3134, 574, 2622, 1598, 3646, 318, 2366, 1342, 3390, 830, 2878, 1854, 3902, 190, 2238, 1214, 3262, 702, 2750, 1726, 3774, 446, 2494, 1470, 3518, 958, 3006, 1982, 4030, 126, 2174, 1150, 3198, 638, 2686, 1662, 3710, 382, 2430, 1406, 3454, 894, 2942, 1918, 3966, 254, 2302, 1278, 3326, 766, 2814, 1790, 3838, 510, 2558, 1534, 3582, 1022, 3070, 2046, 4094, 1, 2049, 1025, 3073, 513, 2561, 1537, 3585, 257, 2305, 1281, 3329, 769, 2817, 1793, 3841, 129, 2177, 1153, 3201, 641, 2689, 1665, 3713, 385, 2433, 1409, 3457, 897, 2945, 1921, 3969, 65, 2113, 1089, 3137, 577, 2625, 1601, 3649, 321, 2369, 1345, 3393, 833, 2881, 1857, 3905, 193, 2241, 1217, 3265, 705, 2753, 1729, 3777, 449, 2497, 1473, 3521, 961, 3009, 1985, 4033, 33, 2081, 1057, 3105, 545, 2593, 1569, 3617, 289, 2337, 1313, 3361, 801, 2849, 1825, 3873, 161, 2209, 1185, 3233, 673, 2721, 1697, 3745, 417, 2465, 1441, 3489, 929, 2977, 1953, 4001, 97, 2145, 1121, 3169, 609, 2657, 1633, 3681, 353, 2401, 1377, 3425, 865, 2913, 1889, 3937, 225, 2273, 1249, 3297, 737, 2785, 1761, 3809, 481, 2529, 1505, 3553, 993, 3041, 2017, 4065, 17, 2065, 1041, 3089, 529, 2577, 1553, 3601, 273, 2321, 1297, 3345, 785, 2833, 1809, 3857, 145, 2193, 1169, 3217, 657, 2705, 1681, 3729, 401, 2449, 1425, 3473, 913, 2961, 1937, 3985, 81, 2129, 1105, 3153, 593, 2641, 1617, 3665, 337, 2385, 1361, 3409, 849, 2897, 1873, 3921, 209, 2257, 1233, 3281, 721, 2769, 1745, 3793, 465, 2513, 1489, 3537, 977, 3025, 2001, 4049, 49, 2097, 1073, 3121, 561, 2609, 1585, 3633, 305, 2353, 1329, 3377, 817, 2865, 1841, 3889, 177, 2225, 1201, 3249, 689, 2737, 1713, 3761, 433, 2481, 1457, 3505, 945, 2993, 1969, 4017, 113, 2161, 1137, 3185, 625, 2673, 1649, 3697, 369, 2417, 1393, 3441, 881, 2929, 1905, 3953, 241, 2289, 1265, 3313, 753, 2801, 1777, 3825, 497, 2545, 1521, 3569, 1009, 3057, 2033, 4081, 9, 2057, 1033, 3081, 521, 2569, 1545, 3593, 265, 2313, 1289, 3337, 777, 2825, 1801, 3849, 137, 2185, 1161, 3209, 649, 2697, 1673, 3721, 393, 2441, 1417, 3465, 905, 2953, 1929, 3977, 73, 2121, 1097, 3145, 585, 2633, 1609, 3657, 329, 2377, 1353, 3401, 841, 2889, 1865, 3913, 201, 2249, 1225, 3273, 713, 2761, 1737, 3785, 457, 2505, 1481, 3529, 969, 3017, 1993, 4041, 41, 2089, 1065, 3113, 553, 2601, 1577, 3625, 297, 2345, 1321, 3369, 809, 2857, 1833, 3881, 169, 2217, 1193, 3241, 681, 2729, 1705, 3753, 425, 2473, 1449, 3497, 937, 2985, 1961, 4009, 105, 2153, 1129, 3177, 617, 2665, 1641, 3689, 361, 2409, 1385, 3433, 873, 2921, 1897, 3945, 233, 2281, 1257, 3305, 745, 2793, 1769, 3817, 489, 2537, 1513, 3561, 1001, 3049, 2025, 4073, 25, 2073, 1049, 3097, 537, 2585, 1561, 3609, 281, 2329, 1305, 3353, 793, 2841, 1817, 3865, 153, 2201, 1177, 3225, 665, 2713, 1689, 3737, 409, 2457, 1433, 3481, 921, 2969, 1945, 3993, 89, 2137, 1113, 3161, 601, 2649, 1625, 3673, 345, 2393, 1369, 3417, 857, 2905, 1881, 3929, 217, 2265, 1241, 3289, 729, 2777, 1753, 3801, 473, 2521, 1497, 3545, 985, 3033, 2009, 4057, 57, 2105, 1081, 3129, 569, 2617, 1593, 3641, 313, 2361, 1337, 3385, 825, 2873, 1849, 3897, 185, 2233, 1209, 3257, 697, 2745, 1721, 3769, 441, 2489, 1465, 3513, 953, 3001, 1977, 4025, 121, 2169, 1145, 3193, 633, 2681, 1657, 3705, 377, 2425, 1401, 3449, 889, 2937, 1913, 3961, 249, 2297, 1273, 3321, 761, 2809, 1785, 3833, 505, 2553, 1529, 3577, 1017, 3065, 2041, 4089, 5, 2053, 1029, 3077, 517, 2565, 1541, 3589, 261, 2309, 1285, 3333, 773, 2821, 1797, 3845, 133, 2181, 1157, 3205, 645, 2693, 1669, 3717, 389, 2437, 1413, 3461, 901, 2949, 1925, 3973, 69, 2117, 1093, 3141, 581, 2629, 1605, 3653, 325, 2373, 1349, 3397, 837, 2885, 1861, 3909, 197, 2245, 1221, 3269, 709, 2757, 1733, 3781, 453, 2501, 1477, 3525, 965, 3013, 1989, 4037, 37, 2085, 1061, 3109, 549, 2597, 1573, 3621, 293, 2341, 1317, 3365, 805, 2853, 1829, 3877, 165, 2213, 1189, 3237, 677, 2725, 1701, 3749, 421, 2469, 1445, 3493, 933, 2981, 1957, 4005, 101, 2149, 1125, 3173, 613, 2661, 1637, 3685, 357, 2405, 1381, 3429, 869, 2917, 1893, 3941, 229, 2277, 1253, 3301, 741, 2789, 1765, 3813, 485, 2533, 1509, 3557, 997, 3045, 2021, 4069, 21, 2069, 1045, 3093, 533, 2581, 1557, 3605, 277, 2325, 1301, 3349, 789, 2837, 1813, 3861, 149, 2197, 1173, 3221, 661, 2709, 1685, 3733, 405, 2453, 1429, 3477, 917, 2965, 1941, 3989, 85, 2133, 1109, 3157, 597, 2645, 1621, 3669, 341, 2389, 1365, 3413, 853, 2901, 1877, 3925, 213, 2261, 1237, 3285, 725, 2773, 1749, 3797, 469, 2517, 1493, 3541, 981, 3029, 2005, 4053, 53, 2101, 1077, 3125, 565, 2613, 1589, 3637, 309, 2357, 1333, 3381, 821, 2869, 1845, 3893, 181, 2229, 1205, 3253, 693, 2741, 1717, 3765, 437, 2485, 1461, 3509, 949, 2997, 1973, 4021, 117, 2165, 1141, 3189, 629, 2677, 1653, 3701, 373, 2421, 1397, 3445, 885, 2933, 1909, 3957, 245, 2293, 1269, 3317, 757, 2805, 1781, 3829, 501, 2549, 1525, 3573, 1013, 3061, 2037, 4085, 13, 2061, 1037, 3085, 525, 2573, 1549, 3597, 269, 2317, 1293, 3341, 781, 2829, 1805, 3853, 141, 2189, 1165, 3213, 653, 2701, 1677, 3725, 397, 2445, 1421, 3469, 909, 2957, 1933, 3981, 77, 2125, 1101, 3149, 589, 2637, 1613, 3661, 333, 2381, 1357, 3405, 845, 2893, 1869, 3917, 205, 2253, 1229, 3277, 717, 2765, 1741, 3789, 461, 2509, 1485, 3533, 973, 3021, 1997, 4045, 45, 2093, 1069, 3117, 557, 2605, 1581, 3629, 301, 2349, 1325, 3373, 813, 2861, 1837, 3885, 173, 2221, 1197, 3245, 685, 2733, 1709, 3757, 429, 2477, 1453, 3501, 941, 2989, 1965, 4013, 109, 2157, 1133, 3181, 621, 2669, 1645, 3693, 365, 2413, 1389, 3437, 877, 2925, 1901, 3949, 237, 2285, 1261, 3309, 749, 2797, 1773, 3821, 493, 2541, 1517, 3565, 1005, 3053, 2029, 4077, 29, 2077, 1053, 3101, 541, 2589, 1565, 3613, 285, 2333, 1309, 3357, 797, 2845, 1821, 3869, 157, 2205, 1181, 3229, 669, 2717, 1693, 3741, 413, 2461, 1437, 3485, 925, 2973, 1949, 3997, 93, 2141, 1117, 3165, 605, 2653, 1629, 3677, 349, 2397, 1373, 3421, 861, 2909, 1885, 3933, 221, 2269, 1245, 3293, 733, 2781, 1757, 3805, 477, 2525, 1501, 3549, 989, 3037, 2013, 4061, 61, 2109, 1085, 3133, 573, 2621, 1597, 3645, 317, 2365, 1341, 3389, 829, 2877, 1853, 3901, 189, 2237, 1213, 3261, 701, 2749, 1725, 3773, 445, 2493, 1469, 3517, 957, 3005, 1981, 4029, 125, 2173, 1149, 3197, 637, 2685, 1661, 3709, 381, 2429, 1405, 3453, 893, 2941, 1917, 3965, 253, 2301, 1277, 3325, 765, 2813, 1789, 3837, 509, 2557, 1533, 3581, 1021, 3069, 2045, 4093, 3, 2051, 1027, 3075, 515, 2563, 1539, 3587, 259, 2307, 1283, 3331, 771, 2819, 1795, 3843, 131, 2179, 1155, 3203, 643, 2691, 1667, 3715, 387, 2435, 1411, 3459, 899, 2947, 1923, 3971, 67, 2115, 1091, 3139, 579, 2627, 1603, 3651, 323, 2371, 1347, 3395, 835, 2883, 1859, 3907, 195, 2243, 1219, 3267, 707, 2755, 1731, 3779, 451, 2499, 1475, 3523, 963, 3011, 1987, 4035, 35, 2083, 1059, 3107, 547, 2595, 1571, 3619, 291, 2339, 1315, 3363, 803, 2851, 1827, 3875, 163, 2211, 1187, 3235, 675, 2723, 1699, 3747, 419, 2467, 1443, 3491, 931, 2979, 1955, 4003, 99, 2147, 1123, 3171, 611, 2659, 1635, 3683, 355, 2403, 1379, 3427, 867, 2915, 1891, 3939, 227, 2275, 1251, 3299, 739, 2787, 1763, 3811, 483, 2531, 1507, 3555, 995, 3043, 2019, 4067, 19, 2067, 1043, 3091, 531, 2579, 1555, 3603, 275, 2323, 1299, 3347, 787, 2835, 1811, 3859, 147, 2195, 1171, 3219, 659, 2707, 1683, 3731, 403, 2451, 1427, 3475, 915, 2963, 1939, 3987, 83, 2131, 1107, 3155, 595, 2643, 1619, 3667, 339, 2387, 1363, 3411, 851, 2899, 1875, 3923, 211, 2259, 1235, 3283, 723, 2771, 1747, 3795, 467, 2515, 1491, 3539, 979, 3027, 2003, 4051, 51, 2099, 1075, 3123, 563, 2611, 1587, 3635, 307, 2355, 1331, 3379, 819, 2867, 1843, 3891, 179, 2227, 1203, 3251, 691, 2739, 1715, 3763, 435, 2483, 1459, 3507, 947, 2995, 1971, 4019, 115, 2163, 1139, 3187, 627, 2675, 1651, 3699, 371, 2419, 1395, 3443, 883, 2931, 1907, 3955, 243, 2291, 1267, 3315, 755, 2803, 1779, 3827, 499, 2547, 1523, 3571, 1011, 3059, 2035, 4083, 11, 2059, 1035, 3083, 523, 2571, 1547, 3595, 267, 2315, 1291, 3339, 779, 2827, 1803, 3851, 139, 2187, 1163, 3211, 651, 2699, 1675, 3723, 395, 2443, 1419, 3467, 907, 2955, 1931, 3979, 75, 2123, 1099, 3147, 587, 2635, 1611, 3659, 331, 2379, 1355, 3403, 843, 2891, 1867, 3915, 203, 2251, 1227, 3275, 715, 2763, 1739, 3787, 459, 2507, 1483, 3531, 971, 3019, 1995, 4043, 43, 2091, 1067, 3115, 555, 2603, 1579, 3627, 299, 2347, 1323, 3371, 811, 2859, 1835, 3883, 171, 2219, 1195, 3243, 683, 2731, 1707, 3755, 427, 2475, 1451, 3499, 939, 2987, 1963, 4011, 107, 2155, 1131, 3179, 619, 2667, 1643, 3691, 363, 2411, 1387, 3435, 875, 2923, 1899, 3947, 235, 2283, 1259, 3307, 747, 2795, 1771, 3819, 491, 2539, 1515, 3563, 1003, 3051, 2027, 4075, 27, 2075, 1051, 3099, 539, 2587, 1563, 3611, 283, 2331, 1307, 3355, 795, 2843, 1819, 3867, 155, 2203, 1179, 3227, 667, 2715, 1691, 3739, 411, 2459, 1435, 3483, 923, 2971, 1947, 3995, 91, 2139, 1115, 3163, 603, 2651, 1627, 3675, 347, 2395, 1371, 3419, 859, 2907, 1883, 3931, 219, 2267, 1243, 3291, 731, 2779, 1755, 3803, 475, 2523, 1499, 3547, 987, 3035, 2011, 4059, 59, 2107, 1083, 3131, 571, 2619, 1595, 3643, 315, 2363, 1339, 3387, 827, 2875, 1851, 3899, 187, 2235, 1211, 3259, 699, 2747, 1723, 3771, 443, 2491, 1467, 3515, 955, 3003, 1979, 4027, 123, 2171, 1147, 3195, 635, 2683, 1659, 3707, 379, 2427, 1403, 3451, 891, 2939, 1915, 3963, 251, 2299, 1275, 3323, 763, 2811, 1787, 3835, 507, 2555, 1531, 3579, 1019, 3067, 2043, 4091, 7, 2055, 1031, 3079, 519, 2567, 1543, 3591, 263, 2311, 1287, 3335, 775, 2823, 1799, 3847, 135, 2183, 1159, 3207, 647, 2695, 1671, 3719, 391, 2439, 1415, 3463, 903, 2951, 1927, 3975, 71, 2119, 1095, 3143, 583, 2631, 1607, 3655, 327, 2375, 1351, 3399, 839, 2887, 1863, 3911, 199, 2247, 1223, 3271, 711, 2759, 1735, 3783, 455, 2503, 1479, 3527, 967, 3015, 1991, 4039, 39, 2087, 1063, 3111, 551, 2599, 1575, 3623, 295, 2343, 1319, 3367, 807, 2855, 1831, 3879, 167, 2215, 1191, 3239, 679, 2727, 1703, 3751, 423, 2471, 1447, 3495, 935, 2983, 1959, 4007, 103, 2151, 1127, 3175, 615, 2663, 1639, 3687, 359, 2407, 1383, 3431, 871, 2919, 1895, 3943, 231, 2279, 1255, 3303, 743, 2791, 1767, 3815, 487, 2535, 1511, 3559, 999, 3047, 2023, 4071, 23, 2071, 1047, 3095, 535, 2583, 1559, 3607, 279, 2327, 1303, 3351, 791, 2839, 1815, 3863, 151, 2199, 1175, 3223, 663, 2711, 1687, 3735, 407, 2455, 1431, 3479, 919, 2967, 1943, 3991, 87, 2135, 1111, 3159, 599, 2647, 1623, 3671, 343, 2391, 1367, 3415, 855, 2903, 1879, 3927, 215, 2263, 1239, 3287, 727, 2775, 1751, 3799, 471, 2519, 1495, 3543, 983, 3031, 2007, 4055, 55, 2103, 1079, 3127, 567, 2615, 1591, 3639, 311, 2359, 1335, 3383, 823, 2871, 1847, 3895, 183, 2231, 1207, 3255, 695, 2743, 1719, 3767, 439, 2487, 1463, 3511, 951, 2999, 1975, 4023, 119, 2167, 1143, 3191, 631, 2679, 1655, 3703, 375, 2423, 1399, 3447, 887, 2935, 1911, 3959, 247, 2295, 1271, 3319, 759, 2807, 1783, 3831, 503, 2551, 1527, 3575, 1015, 3063, 2039, 4087, 15, 2063, 1039, 3087, 527, 2575, 1551, 3599, 271, 2319, 1295, 3343, 783, 2831, 1807, 3855, 143, 2191, 1167, 3215, 655, 2703, 1679, 3727, 399, 2447, 1423, 3471, 911, 2959, 1935, 3983, 79, 2127, 1103, 3151, 591, 2639, 1615, 3663, 335, 2383, 1359, 3407, 847, 2895, 1871, 3919, 207, 2255, 1231, 3279, 719, 2767, 1743, 3791, 463, 2511, 1487, 3535, 975, 3023, 1999, 4047, 47, 2095, 1071, 3119, 559, 2607, 1583, 3631, 303, 2351, 1327, 3375, 815, 2863, 1839, 3887, 175, 2223, 1199, 3247, 687, 2735, 1711, 3759, 431, 2479, 1455, 3503, 943, 2991, 1967, 4015, 111, 2159, 1135, 3183, 623, 2671, 1647, 3695, 367, 2415, 1391, 3439, 879, 2927, 1903, 3951, 239, 2287, 1263, 3311, 751, 2799, 1775, 3823, 495, 2543, 1519, 3567, 1007, 3055, 2031, 4079, 31, 2079, 1055, 3103, 543, 2591, 1567, 3615, 287, 2335, 1311, 3359, 799, 2847, 1823, 3871, 159, 2207, 1183, 3231, 671, 2719, 1695, 3743, 415, 2463, 1439, 3487, 927, 2975, 1951, 3999, 95, 2143, 1119, 3167, 607, 2655, 1631, 3679, 351, 2399, 1375, 3423, 863, 2911, 1887, 3935, 223, 2271, 1247, 3295, 735, 2783, 1759, 3807, 479, 2527, 1503, 3551, 991, 3039, 2015, 4063, 63, 2111, 1087, 3135, 575, 2623, 1599, 3647, 319, 2367, 1343, 3391, 831, 2879, 1855, 3903, 191, 2239, 1215, 3263, 703, 2751, 1727, 3775, 447, 2495, 1471, 3519, 959, 3007, 1983, 4031, 127, 2175, 1151, 3199, 639, 2687, 1663, 3711, 383, 2431, 1407, 3455, 895, 2943, 1919, 3967, 255, 2303, 1279, 3327, 767, 2815, 1791, 3839, 511, 2559, 1535, 3583, 1023, 3071, 2047, 4095}; int *get_indices_gpu; cudaMalloc((void**)&get_indices_gpu, n * sizeof(int)); switch(n){ case 1 : cudaMemcpy(get_indices_gpu, get_indices1, n * sizeof(int), cudaMemcpyHostToDevice); break; case 2 : cudaMemcpy(get_indices_gpu, get_indices2, n * sizeof(int), cudaMemcpyHostToDevice); break; case 4 : cudaMemcpy(get_indices_gpu, get_indices4, n * sizeof(int), cudaMemcpyHostToDevice); break; case 8 : cudaMemcpy(get_indices_gpu, get_indices8, n * sizeof(int), cudaMemcpyHostToDevice); break; case 16 : cudaMemcpy(get_indices_gpu, get_indices16, n * sizeof(int), cudaMemcpyHostToDevice); break; case 32 : cudaMemcpy(get_indices_gpu, get_indices32, n * sizeof(int), cudaMemcpyHostToDevice); break; case 64 : cudaMemcpy(get_indices_gpu, get_indices64, n * sizeof(int), cudaMemcpyHostToDevice); break; case 128 : cudaMemcpy(get_indices_gpu, get_indices128, n * sizeof(int), cudaMemcpyHostToDevice); break; case 256 : cudaMemcpy(get_indices_gpu, get_indices256, n * sizeof(int), cudaMemcpyHostToDevice); break; case 512 : cudaMemcpy(get_indices_gpu, get_indices512, n * sizeof(int), cudaMemcpyHostToDevice); break; case 1024 : cudaMemcpy(get_indices_gpu, get_indices1024, n * sizeof(int), cudaMemcpyHostToDevice); break; case 2048 : cudaMemcpy(get_indices_gpu, get_indices2048, n * sizeof(int), cudaMemcpyHostToDevice); break; case 4096 : cudaMemcpy(get_indices_gpu, get_indices4096, n * sizeof(int), cudaMemcpyHostToDevice); break; } uint64_t*result; cudaMalloc((void**)&result, size); // set a place to save the result if (n<=1024) { bit_reverse_gpu<<<batch, n>>>(vec, result, get_indices_gpu, n, batch); } else { bit_reverse_gpu<<<n*batch/1024, 1024>>>(vec, result, get_indices_gpu, n, batch); } cudaFree(get_indices_gpu); return result; }
fbb91162ab36f0e8e9bebb4d7c415402db27a2e8.hip
// !!! This is a file automatically generated by hipify!!! /* Includes, system */ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define N 512 /* DEVICE CODE */ __global__ void suma_2_enteros(int *d1, int *d2, int *sum){ *sum = *d1 + *d2; } /* HOST CODE*/ int main(int argc, char** argv) { int DeviceCount = 0,i; int *h_d1,*h_d2,*h_sum; int *d_d1,*d_d2,*d_sum; h_d1 = (int*)malloc(N * sizeof(h_d1[0])); h_d2 = (int*)malloc(N * sizeof(h_d2[0])); h_sum = (int*)malloc(N * sizeof(h_sum[0])); for (i=0;i<N;i++){h_d1[i]=i;h_d2[i]=10*i;h_sum[i]=0;} /* Initialize CUDA */ if (hipInit(0) != 0){ printf("ERROR de inicializacion\n"); exit(0); } hipGetDeviceCount(&DeviceCount); if (DeviceCount == 0){ printf("ERROR ningun dispositivo soporta CUDA\n"); exit(0); } hipMalloc((void**)&d_d1,N*sizeof(d_d1));hipMemset(d_d1,0,N*sizeof(d_d1)); hipMalloc((void**)&d_d2,N*sizeof(d_d2));hipMemset(d_d2,0,N*sizeof(d_d2)); hipMalloc((void**)&d_sum,N*sizeof(d_sum));hipMemset(d_sum,0,N*sizeof(d_sum)); hipMemcpy(d_d1,h_d1,N*sizeof(h_d1[0]),hipMemcpyHostToDevice); hipMemcpy(d_d2,h_d2,N*sizeof(h_d2[0]),hipMemcpyHostToDevice); hipLaunchKernelGGL(( suma_2_enteros), dim3(1),dim3(512), 0, 0, d_d1,d_d2,d_sum); hipMemcpy(h_sum,d_sum,N*sizeof(h_sum[0]),hipMemcpyDeviceToHost); for (i=0;i<10;i++) printf("Resultado: %d \n",h_sum[i]); hipFree(d_d1);hipFree(d_d2);hipFree(d_sum); }
fbb91162ab36f0e8e9bebb4d7c415402db27a2e8.cu
/* Includes, system */ #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #define N 512 /* DEVICE CODE */ __global__ void suma_2_enteros(int *d1, int *d2, int *sum){ *sum = *d1 + *d2; } /* HOST CODE*/ int main(int argc, char** argv) { int DeviceCount = 0,i; int *h_d1,*h_d2,*h_sum; int *d_d1,*d_d2,*d_sum; h_d1 = (int*)malloc(N * sizeof(h_d1[0])); h_d2 = (int*)malloc(N * sizeof(h_d2[0])); h_sum = (int*)malloc(N * sizeof(h_sum[0])); for (i=0;i<N;i++){h_d1[i]=i;h_d2[i]=10*i;h_sum[i]=0;} /* Initialize CUDA */ if (cuInit(0) != 0){ printf("ERROR de inicializacion\n"); exit(0); } cuDeviceGetCount(&DeviceCount); if (DeviceCount == 0){ printf("ERROR ningun dispositivo soporta CUDA\n"); exit(0); } cudaMalloc((void**)&d_d1,N*sizeof(d_d1));cudaMemset(d_d1,0,N*sizeof(d_d1)); cudaMalloc((void**)&d_d2,N*sizeof(d_d2));cudaMemset(d_d2,0,N*sizeof(d_d2)); cudaMalloc((void**)&d_sum,N*sizeof(d_sum));cudaMemset(d_sum,0,N*sizeof(d_sum)); cudaMemcpy(d_d1,h_d1,N*sizeof(h_d1[0]),cudaMemcpyHostToDevice); cudaMemcpy(d_d2,h_d2,N*sizeof(h_d2[0]),cudaMemcpyHostToDevice); suma_2_enteros<<<1,512>>>(d_d1,d_d2,d_sum); cudaMemcpy(h_sum,d_sum,N*sizeof(h_sum[0]),cudaMemcpyDeviceToHost); for (i=0;i<10;i++) printf("Resultado: %d \n",h_sum[i]); cudaFree(d_d1);cudaFree(d_d2);cudaFree(d_sum); }
d8c0f4ffefba3ad158f4b1bc7604b65a1b55c9ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // RUN: %clang_cc1 -std=c++11 -fcuda-is-device -fsyntax-only -verify=dev %s // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify=host %s // host-no-diagnostics #include "Inputs/cuda.h" int func(); struct A { int x; static int host_var; }; int A::host_var; // dev-note {{host variable declared here}} namespace X { int host_var; // dev-note {{host variable declared here}} } // struct with non-empty ctor. struct B1 { int x; B1() { x = 1; } }; // struct with non-empty dtor. struct B2 { int x; B2() {} ~B2() { x = 0; } }; static int static_host_var; // dev-note {{host variable declared here}} __device__ int global_dev_var; __constant__ int global_constant_var; __shared__ int global_shared_var; int global_host_var; // dev-note 8{{host variable declared here}} const int global_const_var = 1; constexpr int global_constexpr_var = 1; int global_host_array[2] = {1, 2}; // dev-note {{host variable declared here}} const int global_const_array[2] = {1, 2}; constexpr int global_constexpr_array[2] = {1, 2}; A global_host_struct_var{1}; // dev-note 2{{host variable declared here}} const A global_const_struct_var{1}; constexpr A global_constexpr_struct_var{1}; // Check const host var initialized with non-empty ctor is not allowed in // device function. const B1 b1; // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} // Check const host var having non-empty dtor is not allowed in device function. const B2 b2; // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} // Check const host var initialized by non-constant initializer is not allowed // in device function. const int b3 = func(); // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} template<typename F> __global__ void kernel(F f) { f(); } // dev-note2 {{called by 'kernel<(lambda}} __device__ void dev_fun(int *out) { // Check access device variables are allowed. int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; // Check access of non-const host variables are not allowed. *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} *out = global_const_var; *out = global_constexpr_var; *out = b1.x; // dev-error {{reference to __host__ variable 'b1' in __device__ function}} *out = b2.x; // dev-error {{reference to __host__ variable 'b2' in __device__ function}} *out = b3; // dev-error {{reference to __host__ variable 'b3' in __device__ function}} global_host_var = 1; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} // Check reference of non-constexpr host variables are not allowed. int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} const int &ref_const_var = global_const_var; const int &ref_constexpr_var = global_constexpr_var; *out = ref_host_var; *out = ref_constexpr_var; *out = ref_const_var; // Check access member of non-constexpr struct type host variable is not allowed. *out = global_host_struct_var.x; // dev-error {{reference to __host__ variable 'global_host_struct_var' in __device__ function}} *out = global_const_struct_var.x; *out = global_constexpr_struct_var.x; global_host_struct_var.x = 1; // dev-error {{reference to __host__ variable 'global_host_struct_var' in __device__ function}} // Check address taking of non-constexpr host variables is not allowed. int *p = &global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} const int *cp = &global_const_var; const int *cp2 = &global_constexpr_var; // Check access elements of non-constexpr host array is not allowed. *out = global_host_array[1]; // dev-error {{reference to __host__ variable 'global_host_array' in __device__ function}} *out = global_const_array[1]; *out = global_constexpr_array[1]; // Check ODR-use of host variables in namespace is not allowed. *out = X::host_var; // dev-error {{reference to __host__ variable 'host_var' in __device__ function}} // Check ODR-use of static host varables in class or file scope is not allowed. *out = A::host_var; // dev-error {{reference to __host__ variable 'host_var' in __device__ function}} *out = static_host_var; // dev-error {{reference to __host__ variable 'static_host_var' in __device__ function}} // Check function-scope static variable is allowed. static int static_var; *out = static_var; // Check non-ODR use of host varirables are allowed. *out = sizeof(global_host_var); *out = sizeof(global_host_struct_var.x); decltype(global_host_var) var1; decltype(global_host_struct_var.x) var2; } __global__ void global_fun(int *out) { int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __global__ function}} int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __global__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } __host__ __device__ void host_dev_fun(int *out) { int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } inline __host__ __device__ void inline_host_dev_fun(int *out) { int &ref_host_var = global_host_var; int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } void dev_lambda_capture_by_ref(int *out) { int &ref_host_var = global_host_var; hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, [&]() { int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} // dev-error@-1 {{capture host variable 'out' by reference in device or host device lambda function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; // dev-error {{capture host variable 'ref_host_var' by reference in device or host device lambda function}} *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; }); } void dev_lambda_capture_by_copy(int *out) { int &ref_host_var = global_host_var; hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, [=]() { int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; }); } // Texture references are special. As far as C++ is concerned they are host // variables that are referenced from device code. However, they are handled // very differently by the compiler under the hood and such references are // allowed. Compiler should produce no warning here, but it should diagnose the // same case without the device_builtin_texture_type attribute. template <class, int = 1, int = 1> struct __attribute__((device_builtin_texture_type)) texture { static texture<int> ref; __device__ void c() { auto &x = ref; } }; template <class, int = 1, int = 1> struct not_a_texture { static not_a_texture<int> ref; __device__ void c() { auto &x = ref; // dev-error {{reference to __host__ variable 'ref' in __device__ function}} } }; template<> not_a_texture<int> not_a_texture<int>::ref; // dev-note {{host variable declared here}} __device__ void test_not_a_texture() { not_a_texture<int> inst; inst.c(); // dev-note {{in instantiation of member function 'not_a_texture<int, 1, 1>::c' requested here}} } // Test static variable in host function used by device function. void test_static_var_host() { for (int i = 0; i < 10; i++) { static int x; // dev-note {{host variable declared here}} struct A { __device__ int f() { return x; // dev-error{{reference to __host__ variable 'x' in __device__ function}} } }; } } // Test static variable in device function used by device function. __device__ void test_static_var_device() { for (int i = 0; i < 10; i++) { static int x; int y = x; struct A { __device__ int f() { return x; } }; } }
d8c0f4ffefba3ad158f4b1bc7604b65a1b55c9ce.cu
// RUN: %clang_cc1 -std=c++11 -fcuda-is-device -fsyntax-only -verify=dev %s // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify=host %s // host-no-diagnostics #include "Inputs/cuda.h" int func(); struct A { int x; static int host_var; }; int A::host_var; // dev-note {{host variable declared here}} namespace X { int host_var; // dev-note {{host variable declared here}} } // struct with non-empty ctor. struct B1 { int x; B1() { x = 1; } }; // struct with non-empty dtor. struct B2 { int x; B2() {} ~B2() { x = 0; } }; static int static_host_var; // dev-note {{host variable declared here}} __device__ int global_dev_var; __constant__ int global_constant_var; __shared__ int global_shared_var; int global_host_var; // dev-note 8{{host variable declared here}} const int global_const_var = 1; constexpr int global_constexpr_var = 1; int global_host_array[2] = {1, 2}; // dev-note {{host variable declared here}} const int global_const_array[2] = {1, 2}; constexpr int global_constexpr_array[2] = {1, 2}; A global_host_struct_var{1}; // dev-note 2{{host variable declared here}} const A global_const_struct_var{1}; constexpr A global_constexpr_struct_var{1}; // Check const host var initialized with non-empty ctor is not allowed in // device function. const B1 b1; // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} // Check const host var having non-empty dtor is not allowed in device function. const B2 b2; // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} // Check const host var initialized by non-constant initializer is not allowed // in device function. const int b3 = func(); // dev-note {{const variable cannot be emitted on device side due to dynamic initialization}} template<typename F> __global__ void kernel(F f) { f(); } // dev-note2 {{called by 'kernel<(lambda}} __device__ void dev_fun(int *out) { // Check access device variables are allowed. int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; // Check access of non-const host variables are not allowed. *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} *out = global_const_var; *out = global_constexpr_var; *out = b1.x; // dev-error {{reference to __host__ variable 'b1' in __device__ function}} *out = b2.x; // dev-error {{reference to __host__ variable 'b2' in __device__ function}} *out = b3; // dev-error {{reference to __host__ variable 'b3' in __device__ function}} global_host_var = 1; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} // Check reference of non-constexpr host variables are not allowed. int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} const int &ref_const_var = global_const_var; const int &ref_constexpr_var = global_constexpr_var; *out = ref_host_var; *out = ref_constexpr_var; *out = ref_const_var; // Check access member of non-constexpr struct type host variable is not allowed. *out = global_host_struct_var.x; // dev-error {{reference to __host__ variable 'global_host_struct_var' in __device__ function}} *out = global_const_struct_var.x; *out = global_constexpr_struct_var.x; global_host_struct_var.x = 1; // dev-error {{reference to __host__ variable 'global_host_struct_var' in __device__ function}} // Check address taking of non-constexpr host variables is not allowed. int *p = &global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __device__ function}} const int *cp = &global_const_var; const int *cp2 = &global_constexpr_var; // Check access elements of non-constexpr host array is not allowed. *out = global_host_array[1]; // dev-error {{reference to __host__ variable 'global_host_array' in __device__ function}} *out = global_const_array[1]; *out = global_constexpr_array[1]; // Check ODR-use of host variables in namespace is not allowed. *out = X::host_var; // dev-error {{reference to __host__ variable 'host_var' in __device__ function}} // Check ODR-use of static host varables in class or file scope is not allowed. *out = A::host_var; // dev-error {{reference to __host__ variable 'host_var' in __device__ function}} *out = static_host_var; // dev-error {{reference to __host__ variable 'static_host_var' in __device__ function}} // Check function-scope static variable is allowed. static int static_var; *out = static_var; // Check non-ODR use of host varirables are allowed. *out = sizeof(global_host_var); *out = sizeof(global_host_struct_var.x); decltype(global_host_var) var1; decltype(global_host_struct_var.x) var2; } __global__ void global_fun(int *out) { int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __global__ function}} int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __global__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } __host__ __device__ void host_dev_fun(int *out) { int &ref_host_var = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } inline __host__ __device__ void inline_host_dev_fun(int *out) { int &ref_host_var = global_host_var; int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; } void dev_lambda_capture_by_ref(int *out) { int &ref_host_var = global_host_var; kernel<<<1,1>>>([&]() { int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} // dev-error@-1 {{capture host variable 'out' by reference in device or host device lambda function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; // dev-error {{capture host variable 'ref_host_var' by reference in device or host device lambda function}} *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; }); } void dev_lambda_capture_by_copy(int *out) { int &ref_host_var = global_host_var; kernel<<<1,1>>>([=]() { int &ref_dev_var = global_dev_var; int &ref_constant_var = global_constant_var; int &ref_shared_var = global_shared_var; const int &ref_constexpr_var = global_constexpr_var; const int &ref_const_var = global_const_var; *out = global_host_var; // dev-error {{reference to __host__ variable 'global_host_var' in __host__ __device__ function}} *out = global_dev_var; *out = global_constant_var; *out = global_shared_var; *out = global_constexpr_var; *out = global_const_var; *out = ref_host_var; *out = ref_dev_var; *out = ref_constant_var; *out = ref_shared_var; *out = ref_constexpr_var; *out = ref_const_var; }); } // Texture references are special. As far as C++ is concerned they are host // variables that are referenced from device code. However, they are handled // very differently by the compiler under the hood and such references are // allowed. Compiler should produce no warning here, but it should diagnose the // same case without the device_builtin_texture_type attribute. template <class, int = 1, int = 1> struct __attribute__((device_builtin_texture_type)) texture { static texture<int> ref; __device__ void c() { auto &x = ref; } }; template <class, int = 1, int = 1> struct not_a_texture { static not_a_texture<int> ref; __device__ void c() { auto &x = ref; // dev-error {{reference to __host__ variable 'ref' in __device__ function}} } }; template<> not_a_texture<int> not_a_texture<int>::ref; // dev-note {{host variable declared here}} __device__ void test_not_a_texture() { not_a_texture<int> inst; inst.c(); // dev-note {{in instantiation of member function 'not_a_texture<int, 1, 1>::c' requested here}} } // Test static variable in host function used by device function. void test_static_var_host() { for (int i = 0; i < 10; i++) { static int x; // dev-note {{host variable declared here}} struct A { __device__ int f() { return x; // dev-error{{reference to __host__ variable 'x' in __device__ function}} } }; } } // Test static variable in device function used by device function. __device__ void test_static_var_device() { for (int i = 0; i < 10; i++) { static int x; int y = x; struct A { __device__ int f() { return x; } }; } }
e453485914788181b26a970a7a7cefa4f6927f32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, long *target, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - TH_INDEX_BASE; assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : 1.0f; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, long *target, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - TH_INDEX_BASE; assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaLongTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (weights && THCudaTensor_nElement(state, weights) != THCudaTensor_size(state, input, 1)) { THError("weight tensor should be defined either for all or no classes"); } if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; long *target_data = THCudaLongTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaLongTensor_size(state, target, 0); ptrdiff_t map_nelem = THCudaLongTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateOutput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(hipGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaLongTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights && THCudaTensor_nElement(state, weights) != THCudaTensor_size(state, input, 1)) { THError("weight tensor should be defined either for all or no classes"); } if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); long *target_data = THCudaLongTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaLongTensor_size(state, target, 0); ptrdiff_t map_nelem = THCudaLongTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; hipLaunchKernelGGL(( cunn_SpatialClassNLLCriterion_updateGradInput_kernel) , dim3(total_blocks), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state), gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(hipGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); }
e453485914788181b26a970a7a7cefa4f6927f32.cu
#include "THCUNN.h" #include "common.h" #include <stdio.h> #include <assert.h> #include <thrust/functional.h> __global__ void cunn_SpatialClassNLLCriterion_updateOutput_kernel( float *output, float *total_weight, float *input, long *target, float *weights, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { __shared__ float partial_sums[CUDA_NUM_THREADS]; int i, t; float cur_weight; float input_sum = 0; float acc_weight = 0; int sample = blockIdx.x / blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; int step = blockDim.x * blocks_per_sample; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = target[toffset + i] - TH_INDEX_BASE; assert(t >= 0 && t < n_classes); cur_weight = weights ? weights[t] : 1.0f; input_sum -= input[ioffset + i + map_nelem * t] * cur_weight; acc_weight += cur_weight; } __syncthreads(); input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<float>(), 0.0f); acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<float>(), 0.0f); if (threadIdx.x == 0) { atomicAdd(total_weight, acc_weight); if (size_average && acc_weight > 0) atomicAdd(output, input_sum / acc_weight / gridDim.x); else atomicAdd(output, input_sum); } } __global__ void cunn_SpatialClassNLLCriterion_updateGradInput_kernel( float *gradInput, long *target, float *weights, float *total_weight, int size_average, int batch_size, int n_classes, int map_nelem, int blocks_per_sample) { if (*total_weight <= 0) return; int i, t; float norm = size_average ? (1.0f / *total_weight) : 1.0f; int sample = blockIdx.x / blocks_per_sample; int step = blockDim.x * blocks_per_sample; int toffset = sample * map_nelem; int ioffset = sample * map_nelem * n_classes; for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) { t = (int)target[toffset + i] - TH_INDEX_BASE; assert(t >= 0 && t < n_classes); gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : 1.0f) * norm; } } void THNN_CudaSpatialClassNLLCriterion_updateOutput( THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaLongTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); if (weights && THCudaTensor_nElement(state, weights) != THCudaTensor_size(state, input, 1)) { THError("weight tensor should be defined either for all or no classes"); } if (weights) THCUNN_assertSameGPU(state, 5, input, target, weights, output, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, output, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *input_data = THCudaTensor_data(state, input); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; long *target_data = THCudaLongTensor_data(state, target); float *output_data = THCudaTensor_data(state, output); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaLongTensor_size(state, target, 0); ptrdiff_t map_nelem = THCudaLongTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; THCudaTensor_fill(state, output, 0); THCudaTensor_fill(state, total_weight, 0); cunn_SpatialClassNLLCriterion_updateOutput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( output_data, total_weight_data, input_data, target_data, weights_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(cudaGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); } void THNN_CudaSpatialClassNLLCriterion_updateGradInput( THCState *state, THCudaTensor *input, THCudaLongTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights, THCudaTensor *total_weight) { THArgCheck(THCudaLongTensor_nDimension(state, target) == 3, 1, "only batches of spatial targets supported (3D tensors)"); THArgCheck(THCudaTensor_nDimension(state, input) == 4, 2, "only batches of spatial inputs supported (4D tensors)"); THArgCheck(THCudaTensor_isContiguous(state, gradInput), 4, "gradInput must be contiguous"); if (weights && THCudaTensor_nElement(state, weights) != THCudaTensor_size(state, input, 1)) { THError("weight tensor should be defined either for all or no classes"); } if (weights) THCUNN_assertSameGPU(state, 5, weights, input, target, gradInput, total_weight); else THCUNN_assertSameGPU(state, 4, input, target, gradInput, total_weight); input = THCudaTensor_newContiguous(state, input); weights = weights ? THCudaTensor_newContiguous(state, weights) : NULL; target = THCudaLongTensor_newContiguous(state, target); float *weights_data = weights ? THCudaTensor_data(state, weights) : NULL; float *gradInput_data = THCudaTensor_data(state, gradInput); long *target_data = THCudaLongTensor_data(state, target); float *total_weight_data = THCudaTensor_data(state, total_weight); long batch_size = THCudaLongTensor_size(state, target, 0); ptrdiff_t map_nelem = THCudaLongTensor_nElement(state, target) / batch_size; int blocks_per_sample = GET_BLOCKS(map_nelem) / 128; blocks_per_sample = (blocks_per_sample == 0) ? 1 : blocks_per_sample; int total_blocks = blocks_per_sample * batch_size; cunn_SpatialClassNLLCriterion_updateGradInput_kernel <<<total_blocks, CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>( gradInput_data, target_data, weights_data, total_weight_data, sizeAverage, THCudaTensor_size(state, input, 0), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) *THCudaTensor_size(state, input, 3), blocks_per_sample ); THCudaCheck(cudaGetLastError()); if (weights) THCudaTensor_free(state, weights); THCudaLongTensor_free(state, target); THCudaTensor_free(state, input); }
0a46fefb6335e01ed82e00f5b02b075c7fe1b774.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int num, const int channels, const int spatial_dim, Dtype* data, const Dtype* channel_max) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; for (int c = 0; c < channels; ++c) { data[(n * channels + c) * spatial_dim + s] -= channel_max[index]; } } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int num, const int channels, const int spatial_dim, Dtype* data, const Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; for (int c = 0; c < channels; ++c) { data[(n * channels + c) * spatial_dim + s] /= channel_sum[index]; } } } template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int spatial_dim = bottom[0]->height() * bottom[0]->width(); caffe_copy(bottom[0]->count(), bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(num * spatial_dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, spatial_dim, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(num * spatial_dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, spatial_dim, top_data, scale_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(num * channels * spatial_dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num * channels * spatial_dim, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(num * spatial_dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, spatial_dim, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(num * spatial_dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, spatial_dim, top_data, scale_data); } template <typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = top[0]->num(); int channels = top[0]->channels(); int spatial_dim = top[0]->height() * top[0]->width(); caffe_copy(top[0]->count(), top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(num * spatial_dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, spatial_dim, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(num * spatial_dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num, channels, spatial_dim, bottom_diff, scale_data); // elementwise multiplication caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
0a46fefb6335e01ed82e00f5b02b075c7fe1b774.cu
#include <algorithm> #include <cfloat> #include <vector> #include "thrust/device_vector.h" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template <typename Dtype> __global__ void kernel_channel_subtract(const int num, const int channels, const int spatial_dim, Dtype* data, const Dtype* channel_max) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; for (int c = 0; c < channels; ++c) { data[(n * channels + c) * spatial_dim + s] -= channel_max[index]; } } } template <typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template <typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template <typename Dtype> __global__ void kernel_channel_div(const int num, const int channels, const int spatial_dim, Dtype* data, const Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; for (int c = 0; c < channels; ++c) { data[(n * channels + c) * spatial_dim + s] /= channel_sum[index]; } } } template <typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } template <typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int spatial_dim = bottom[0]->height() * bottom[0]->width(); caffe_copy(bottom[0]->count(), bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(num, channels, spatial_dim, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(num, channels, spatial_dim, top_data, scale_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(num * channels * spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(num * channels * spatial_dim, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(num, channels, spatial_dim, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(num, channels, spatial_dim, top_data, scale_data); } template <typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); int num = top[0]->num(); int channels = top[0]->channels(); int spatial_dim = top[0]->height() * top[0]->width(); caffe_copy(top[0]->count(), top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(num, channels, spatial_dim, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS>>>(num, channels, spatial_dim, bottom_diff, scale_data); // elementwise multiplication caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
2bf15b8eac46d9996b735f49829067672b4df982.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <limits> #include <vector> #include "paddle/fluid/operators/stack_op.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace plat = paddle::platform; namespace ops = paddle::operators; namespace paddle { namespace operators { template <typename T, typename IntType> __global__ void StackCUDAKernel(T** input_ptrs, int split_size, int rows, int cols, T* __restrict__ output) { IntType grid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; grid_x < cols; grid_x += blockDim.x * gridDim.x) { IntType grid_y = blockIdx.y * blockDim.y + threadIdx.y; IntType split = grid_x / split_size; const T* input_ptr = input_ptrs[split]; IntType col_offset = grid_x % split_size; #pragma unroll for (; grid_y < rows; grid_y += blockDim.y * gridDim.y) { output[grid_y * cols + grid_x] = input_ptr[grid_y * split_size + col_offset]; } } } template <typename T> class StackGPUKernel : public framework::OpKernel<T> { using Tensor = framework::LoDTensor; public: void Compute(const framework::ExecutionContext& ctx) const override { auto x = ctx.MultiInput<Tensor>("X"); auto* y = ctx.Output<Tensor>("Y"); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis += (x[0]->dims().size() + 1); int n = static_cast<int>(x.size()); auto* y_data = y->mutable_data<T>(ctx.GetPlace()); std::vector<const T*> x_datas(n); for (int i = 0; i < n; i++) { x_datas[i] = x[i]->data<T>(); } auto& dev_ctx = ctx.template device_context<plat::CUDADeviceContext>(); auto tmp_x_data = memory::Alloc(dev_ctx, x_datas.size() * sizeof(T*)); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()), tmp_x_data->ptr(), platform::CPUPlace(), reinterpret_cast<void*>(x_datas.data()), x_datas.size() * sizeof(T*), dev_ctx.stream()); // Split x dim from axis to matrix int x_row = 1, x_col = 1; for (int i = 0; i < axis; ++i) { x_row *= x[0]->dims()[i]; } x_col = x[0]->numel() / x_row; int out_col = x_col * n; auto config = GetGpuLaunchConfig2D(dev_ctx, out_col, x_row); if (y->numel() < std::numeric_limits<int32_t>::max()) { hipLaunchKernelGGL(( StackCUDAKernel<T, int32_t>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), reinterpret_cast<T**>(tmp_x_data->ptr()), x_col, x_row, out_col, y_data); } else { hipLaunchKernelGGL(( StackCUDAKernel<T, int64_t>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), reinterpret_cast<T**>(tmp_x_data->ptr()), x_col, x_row, out_col, y_data); } } }; template <typename T, typename IntType> __global__ void UnStackCUDAKernel(const T* __restrict__ input, int pre_dim_size, int split_dim_size, int suf_dim_size, int num_split, T** output_ptrs) { assert(blockDim.y == 1); assert(blockDim.z == 1); // In this case they are equal assert(split_dim_size % num_split == 0); IntType size = pre_dim_size * split_dim_size * suf_dim_size; IntType each_dim_size = split_dim_size / num_split; for (IntType offset = blockIdx.x * blockDim.x + threadIdx.x; offset < size; offset += blockDim.x * gridDim.x) { IntType i = offset / (split_dim_size * suf_dim_size); IntType j = (offset % (split_dim_size * suf_dim_size)) / suf_dim_size; IntType k = offset % suf_dim_size; T* output = output_ptrs[j / each_dim_size]; IntType output_ind = i * each_dim_size * suf_dim_size + (j % each_dim_size) * suf_dim_size + k; *(output + output_ind) = input[offset]; } } template <typename T> class StackGradGPUKernel : public framework::OpKernel<T> { using Tensor = framework::LoDTensor; public: void Compute(const framework::ExecutionContext& ctx) const override { auto* dy = ctx.Input<Tensor>(framework::GradVarName("Y")); auto dx = ctx.MultiOutput<Tensor>(framework::GradVarName("X")); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis += dy->dims().size(); int n = dy->dims()[axis]; PADDLE_ENFORCE_EQ(n, dx.size(), platform::errors::InvalidArgument( "Output dx size should be equal to n, but" " received n is:%d dx size is:%d.", n, dx.size())); // dx is output, so save each data address, then copy each dy into dx_data std::vector<T*> outputs(n); auto out_var_names = ctx.OutputNames(framework::GradVarName("X")); for (size_t j = 0; j < dx.size(); ++j) { if (out_var_names[j] != framework::kEmptyVarName && dx[j]->numel() != 0UL) { T* ptr = dx[j]->mutable_data<T>(ctx.GetPlace()); outputs[j] = ptr; } else { outputs[j] = nullptr; } } auto dy_data = dy->data<T>(); // each dx should have same shape int dy_pre = 1, dy_suf = 1; auto dy_dims = dy->dims(); int split_dim = n; for (int i = 0; i < axis; ++i) { dy_pre *= dy_dims[i]; } dy_suf = dy->numel() / (split_dim * dy_pre); auto& dev_ctx = ctx.template device_context<plat::CUDADeviceContext>(); auto tmp_out_data = memory::Alloc(dev_ctx, outputs.size() * sizeof(T*)); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()), tmp_out_data->ptr(), platform::CPUPlace(), reinterpret_cast<void*>(outputs.data()), outputs.size() * sizeof(T*), dev_ctx.stream()); auto config = GetGpuLaunchConfig1D(dev_ctx, dy_pre * split_dim * dy_suf); if (dy->numel() < std::numeric_limits<int32_t>::max()) { hipLaunchKernelGGL(( UnStackCUDAKernel< T, int32_t>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), dy_data, dy_pre, split_dim, dy_suf, split_dim, reinterpret_cast<T**>(tmp_out_data->ptr())); } else { hipLaunchKernelGGL(( UnStackCUDAKernel< T, int64_t>), dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), dy_data, dy_pre, split_dim, dy_suf, split_dim, reinterpret_cast<T**>(tmp_out_data->ptr())); } } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(stack, ops::StackGPUKernel<float>, ops::StackGPUKernel<double>, ops::StackGPUKernel<int>, ops::StackGPUKernel<int64_t>, ops::StackGPUKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(stack_grad, ops::StackGradGPUKernel<float>, ops::StackGradGPUKernel<double>, ops::StackGradGPUKernel<int>, ops::StackGradGPUKernel<int64_t>, ops::StackGradGPUKernel<plat::float16>);
2bf15b8eac46d9996b735f49829067672b4df982.cu
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <limits> #include <vector> #include "paddle/fluid/operators/stack_op.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace plat = paddle::platform; namespace ops = paddle::operators; namespace paddle { namespace operators { template <typename T, typename IntType> __global__ void StackCUDAKernel(T** input_ptrs, int split_size, int rows, int cols, T* __restrict__ output) { IntType grid_x = blockIdx.x * blockDim.x + threadIdx.x; for (; grid_x < cols; grid_x += blockDim.x * gridDim.x) { IntType grid_y = blockIdx.y * blockDim.y + threadIdx.y; IntType split = grid_x / split_size; const T* input_ptr = input_ptrs[split]; IntType col_offset = grid_x % split_size; #pragma unroll for (; grid_y < rows; grid_y += blockDim.y * gridDim.y) { output[grid_y * cols + grid_x] = input_ptr[grid_y * split_size + col_offset]; } } } template <typename T> class StackGPUKernel : public framework::OpKernel<T> { using Tensor = framework::LoDTensor; public: void Compute(const framework::ExecutionContext& ctx) const override { auto x = ctx.MultiInput<Tensor>("X"); auto* y = ctx.Output<Tensor>("Y"); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis += (x[0]->dims().size() + 1); int n = static_cast<int>(x.size()); auto* y_data = y->mutable_data<T>(ctx.GetPlace()); std::vector<const T*> x_datas(n); for (int i = 0; i < n; i++) { x_datas[i] = x[i]->data<T>(); } auto& dev_ctx = ctx.template device_context<plat::CUDADeviceContext>(); auto tmp_x_data = memory::Alloc(dev_ctx, x_datas.size() * sizeof(T*)); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()), tmp_x_data->ptr(), platform::CPUPlace(), reinterpret_cast<void*>(x_datas.data()), x_datas.size() * sizeof(T*), dev_ctx.stream()); // Split x dim from axis to matrix int x_row = 1, x_col = 1; for (int i = 0; i < axis; ++i) { x_row *= x[0]->dims()[i]; } x_col = x[0]->numel() / x_row; int out_col = x_col * n; auto config = GetGpuLaunchConfig2D(dev_ctx, out_col, x_row); if (y->numel() < std::numeric_limits<int32_t>::max()) { StackCUDAKernel<T, int32_t><<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>( reinterpret_cast<T**>(tmp_x_data->ptr()), x_col, x_row, out_col, y_data); } else { StackCUDAKernel<T, int64_t><<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>( reinterpret_cast<T**>(tmp_x_data->ptr()), x_col, x_row, out_col, y_data); } } }; template <typename T, typename IntType> __global__ void UnStackCUDAKernel(const T* __restrict__ input, int pre_dim_size, int split_dim_size, int suf_dim_size, int num_split, T** output_ptrs) { assert(blockDim.y == 1); assert(blockDim.z == 1); // In this case they are equal assert(split_dim_size % num_split == 0); IntType size = pre_dim_size * split_dim_size * suf_dim_size; IntType each_dim_size = split_dim_size / num_split; for (IntType offset = blockIdx.x * blockDim.x + threadIdx.x; offset < size; offset += blockDim.x * gridDim.x) { IntType i = offset / (split_dim_size * suf_dim_size); IntType j = (offset % (split_dim_size * suf_dim_size)) / suf_dim_size; IntType k = offset % suf_dim_size; T* output = output_ptrs[j / each_dim_size]; IntType output_ind = i * each_dim_size * suf_dim_size + (j % each_dim_size) * suf_dim_size + k; *(output + output_ind) = input[offset]; } } template <typename T> class StackGradGPUKernel : public framework::OpKernel<T> { using Tensor = framework::LoDTensor; public: void Compute(const framework::ExecutionContext& ctx) const override { auto* dy = ctx.Input<Tensor>(framework::GradVarName("Y")); auto dx = ctx.MultiOutput<Tensor>(framework::GradVarName("X")); int axis = ctx.Attr<int>("axis"); if (axis < 0) axis += dy->dims().size(); int n = dy->dims()[axis]; PADDLE_ENFORCE_EQ(n, dx.size(), platform::errors::InvalidArgument( "Output dx size should be equal to n, but" " received n is:%d dx size is:%d.", n, dx.size())); // dx is output, so save each data address, then copy each dy into dx_data std::vector<T*> outputs(n); auto out_var_names = ctx.OutputNames(framework::GradVarName("X")); for (size_t j = 0; j < dx.size(); ++j) { if (out_var_names[j] != framework::kEmptyVarName && dx[j]->numel() != 0UL) { T* ptr = dx[j]->mutable_data<T>(ctx.GetPlace()); outputs[j] = ptr; } else { outputs[j] = nullptr; } } auto dy_data = dy->data<T>(); // each dx should have same shape int dy_pre = 1, dy_suf = 1; auto dy_dims = dy->dims(); int split_dim = n; for (int i = 0; i < axis; ++i) { dy_pre *= dy_dims[i]; } dy_suf = dy->numel() / (split_dim * dy_pre); auto& dev_ctx = ctx.template device_context<plat::CUDADeviceContext>(); auto tmp_out_data = memory::Alloc(dev_ctx, outputs.size() * sizeof(T*)); memory::Copy(BOOST_GET_CONST(platform::CUDAPlace, dev_ctx.GetPlace()), tmp_out_data->ptr(), platform::CPUPlace(), reinterpret_cast<void*>(outputs.data()), outputs.size() * sizeof(T*), dev_ctx.stream()); auto config = GetGpuLaunchConfig1D(dev_ctx, dy_pre * split_dim * dy_suf); if (dy->numel() < std::numeric_limits<int32_t>::max()) { UnStackCUDAKernel< T, int32_t><<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>( dy_data, dy_pre, split_dim, dy_suf, split_dim, reinterpret_cast<T**>(tmp_out_data->ptr())); } else { UnStackCUDAKernel< T, int64_t><<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>( dy_data, dy_pre, split_dim, dy_suf, split_dim, reinterpret_cast<T**>(tmp_out_data->ptr())); } } }; } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL(stack, ops::StackGPUKernel<float>, ops::StackGPUKernel<double>, ops::StackGPUKernel<int>, ops::StackGPUKernel<int64_t>, ops::StackGPUKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL(stack_grad, ops::StackGradGPUKernel<float>, ops::StackGradGPUKernel<double>, ops::StackGradGPUKernel<int>, ops::StackGradGPUKernel<int64_t>, ops::StackGradGPUKernel<plat::float16>);
c9bb596c17aaadf7c2a10cdc18891de96aa798ef.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper #include <hip/hip_runtime.h> //#include <helper_cuda.h> //#include <helper_timer.h> #include <iostream> #include <fstream> // Options #define GAMMA 1.4f #define iterations 1 #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) #define checkCudaErrors {\ hipError_t err;\ if((err = hipGetLastError()) != hipSuccess) {\ printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__);\ exit(0);\ }\ } // Generic functions template <typename T> T* alloc(int N) { T* t; hipMalloc((void**)&t, sizeof(T)*N); checkCudaErrors return t; } template <typename T> void dealloc(T* array) { hipFree((void*)array); checkCudaErrors } template <typename T> void copy(T* dst, T* src, int N) { hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice); checkCudaErrors } template <typename T> void upload(T* dst, T* src, int N) { hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice); checkCudaErrors } template <typename T> void download(T* dst, T* src, int N) { hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost); checkCudaErrors } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_flux_contribution_momentum_x[1]; __constant__ float3 ff_flux_contribution_momentum_y[1]; __constant__ float3 ff_flux_contribution_momentum_z[1]; __constant__ float3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables); checkCudaErrors } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { dim3 Dg(nelr / block_length), Db(block_length); hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors); checkCudaErrors } __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { dim3 Dg(nelr/block_length), Db(block_length); // printf("compute_flux: numBlocks=%d, numThreads=%d\n", nelr/block_length, block_length); hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes); checkCudaErrors } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { dim3 Dg(nelr/block_length), Db(block_length); // printf("time_step: numBlocks=%d, numThreads=%d\n", nelr/block_length, block_length); hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes); checkCudaErrors } // Main function int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; /* hipDeviceProp_t prop; int dev; checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipGetDevice(&dev)); checkCudaErrors(hipGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); */ // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)); hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)); hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)); hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)); hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)); checkCudaErrors } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ ::min(1, nel % block_length)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); hipMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing hipDeviceSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "[BENCH] Starting..." << std::endl; // StopWatchInterface *timer = 0; // sdkCreateTimer(&timer); // sdkStartTimer(&timer); // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); checkCudaErrors for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); checkCudaErrors time_step(j, nelr, old_variables, variables, step_factors, fluxes); checkCudaErrors } } hipDeviceSynchronize(); // sdkStopTimer(&timer); // std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "[BENCH] Saving solution..." << std::endl; dump(variables, nel, nelr); // std::cout << "Saved solution..." << std::endl; // std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); std::cout << "[BENCH] Done..." << std::endl; return 0; }
c9bb596c17aaadf7c2a10cdc18891de96aa798ef.cu
// Copyright 2009, Andrew Corrigan, [email protected] // This code is from the AIAA-2009-4001 paper #include <cuda.h> //#include <helper_cuda.h> //#include <helper_timer.h> #include <iostream> #include <fstream> // Options #define GAMMA 1.4f #define iterations 1 #ifndef block_length #define block_length 192 #endif #define NDIM 3 #define NNB 4 #define RK 3 // 3rd order RK #define ff_mach 1.2f #define deg_angle_of_attack 0.0f #if block_length > 128 #warning "the kernels may fail too launch on some systems if the block length is too large" #endif #define VAR_DENSITY 0 #define VAR_MOMENTUM 1 #define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM) #define NVAR (VAR_DENSITY_ENERGY+1) #define checkCudaErrors {\ cudaError_t err;\ if((err = cudaGetLastError()) != cudaSuccess) {\ printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__);\ exit(0);\ }\ } // Generic functions template <typename T> T* alloc(int N) { T* t; cudaMalloc((void**)&t, sizeof(T)*N); checkCudaErrors return t; } template <typename T> void dealloc(T* array) { cudaFree((void*)array); checkCudaErrors } template <typename T> void copy(T* dst, T* src, int N) { cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice); checkCudaErrors } template <typename T> void upload(T* dst, T* src, int N) { cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice); checkCudaErrors } template <typename T> void download(T* dst, T* src, int N) { cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost); checkCudaErrors } void dump(float* variables, int nel, int nelr) { float* h_variables = new float[nelr*NVAR]; download(h_variables, variables, nelr*NVAR); { std::ofstream file("density"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl; } { std::ofstream file("momentum"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) { for(int j = 0; j != NDIM; j++) file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " "; file << std::endl; } } { std::ofstream file("density_energy"); file << nel << " " << nelr << std::endl; for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl; } delete[] h_variables; } /* * Element-based Cell-centered FVM solver functions */ __constant__ float ff_variable[NVAR]; __constant__ float3 ff_flux_contribution_momentum_x[1]; __constant__ float3 ff_flux_contribution_momentum_y[1]; __constant__ float3 ff_flux_contribution_momentum_z[1]; __constant__ float3 ff_flux_contribution_density_energy[1]; __global__ void cuda_initialize_variables(int nelr, float* variables) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); for(int j = 0; j < NVAR; j++) variables[i + j*nelr] = ff_variable[j]; } void initialize_variables(int nelr, float* variables) { dim3 Dg(nelr / block_length), Db(block_length); cuda_initialize_variables<<<Dg, Db>>>(nelr, variables); checkCudaErrors } __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy) { fc_momentum_x.x = velocity.x*momentum.x + pressure; fc_momentum_x.y = velocity.x*momentum.y; fc_momentum_x.z = velocity.x*momentum.z; fc_momentum_y.x = fc_momentum_x.y; fc_momentum_y.y = velocity.y*momentum.y + pressure; fc_momentum_y.z = velocity.y*momentum.z; fc_momentum_z.x = fc_momentum_x.z; fc_momentum_z.y = fc_momentum_y.z; fc_momentum_z.z = velocity.z*momentum.z + pressure; float de_p = density_energy+pressure; fc_density_energy.x = velocity.x*de_p; fc_density_energy.y = velocity.y*de_p; fc_density_energy.z = velocity.z*de_p; } __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity) { velocity.x = momentum.x / density; velocity.y = momentum.y / density; velocity.z = momentum.z / density; } __device__ inline float compute_speed_sqd(float3& velocity) { return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z; } __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd) { return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd); } __device__ inline float compute_speed_of_sound(float& density, float& pressure) { return sqrtf(float(GAMMA)*pressure/density); } __global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float density = variables[i + VAR_DENSITY*nelr]; float3 momentum; momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity; compute_velocity(density, momentum, velocity); float speed_sqd = compute_speed_sqd(velocity); float pressure = compute_pressure(density, density_energy, speed_sqd); float speed_of_sound = compute_speed_of_sound(density, pressure); // dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound)); } void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors) { dim3 Dg(nelr / block_length), Db(block_length); cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors); checkCudaErrors } __global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { const float smoothing_coefficient = float(0.2f); const int i = (blockDim.x*blockIdx.x + threadIdx.x); int j, nb; float3 normal; float normal_len; float factor; float density_i = variables[i + VAR_DENSITY*nelr]; float3 momentum_i; momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr]; momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr]; momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr]; float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr]; float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i); float speed_sqd_i = compute_speed_sqd(velocity_i); float speed_i = sqrtf(speed_sqd_i); float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i); float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i); float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z; float3 flux_contribution_i_density_energy; compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy); float flux_i_density = float(0.0f); float3 flux_i_momentum; flux_i_momentum.x = float(0.0f); flux_i_momentum.y = float(0.0f); flux_i_momentum.z = float(0.0f); float flux_i_density_energy = float(0.0f); float3 velocity_nb; float density_nb, density_energy_nb; float3 momentum_nb; float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z; float3 flux_contribution_nb_density_energy; float speed_sqd_nb, speed_of_sound_nb, pressure_nb; #pragma unroll for(j = 0; j < NNB; j++) { nb = elements_surrounding_elements[i + j*nelr]; normal.x = normals[i + (j + 0*NNB)*nelr]; normal.y = normals[i + (j + 1*NNB)*nelr]; normal.z = normals[i + (j + 2*NNB)*nelr]; normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z); if(nb >= 0) // a legitimate neighbor { density_nb = variables[nb + VAR_DENSITY*nelr]; momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr]; momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr]; momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr]; density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr]; compute_velocity(density_nb, momentum_nb, velocity_nb); speed_sqd_nb = compute_speed_sqd(velocity_nb); pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb); speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb); compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy); // artificial viscosity factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb); flux_i_density += factor*(density_i-density_nb); flux_i_density_energy += factor*(density_energy_i-density_energy_nb); flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x); flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y); flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z); // accumulate cell-centered fluxes factor = float(0.5f)*normal.x; flux_i_density += factor*(momentum_nb.x+momentum_i.x); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(momentum_nb.y+momentum_i.y); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(momentum_nb.z+momentum_i.z); flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z); } else if(nb == -1) // a wing boundary { flux_i_momentum.x += normal.x*pressure_i; flux_i_momentum.y += normal.y*pressure_i; flux_i_momentum.z += normal.z*pressure_i; } else if(nb == -2) // a far field boundary { factor = float(0.5f)*normal.x; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x); factor = float(0.5f)*normal.y; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y); factor = float(0.5f)*normal.z; flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z); flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z); flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z); flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z); flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z); } } fluxes[i + VAR_DENSITY*nelr] = flux_i_density; fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x; fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y; fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z; fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy; } void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes) { dim3 Dg(nelr/block_length), Db(block_length); // printf("compute_flux: numBlocks=%d, numThreads=%d\n", nelr/block_length, block_length); cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes); checkCudaErrors } __global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { const int i = (blockDim.x*blockIdx.x + threadIdx.x); float factor = step_factors[i]/float(RK+1-j); variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr]; variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr]; variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr]; variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr]; variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr]; } void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes) { dim3 Dg(nelr/block_length), Db(block_length); // printf("time_step: numBlocks=%d, numThreads=%d\n", nelr/block_length, block_length); cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes); checkCudaErrors } // Main function int main(int argc, char** argv) { if (argc < 2) { std::cout << "specify data file name" << std::endl; return 0; } const char* data_file_name = argv[1]; /* cudaDeviceProp prop; int dev; checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaGetDevice(&dev)); checkCudaErrors(cudaGetDeviceProperties(&prop, dev)); printf("Name: %s\n", prop.name); */ // set far field conditions and load them into constant memory on the gpu { float h_ff_variable[NVAR]; const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack); h_ff_variable[VAR_DENSITY] = float(1.4); float ff_pressure = float(1.0f); float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]); float ff_speed = float(ff_mach)*ff_speed_of_sound; float3 ff_velocity; ff_velocity.x = ff_speed*float(cos((float)angle_of_attack)); ff_velocity.y = ff_speed*float(sin((float)angle_of_attack)); ff_velocity.z = 0.0f; h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x; h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y; h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z; h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f)); float3 h_ff_momentum; h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0); h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1); h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2); float3 h_ff_flux_contribution_momentum_x; float3 h_ff_flux_contribution_momentum_y; float3 h_ff_flux_contribution_momentum_z; float3 h_ff_flux_contribution_density_energy; compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy); // copy far field conditions to the gpu cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)); cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)); cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)); cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)); cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)); checkCudaErrors } int nel; int nelr; // read in domain geometry float* areas; int* elements_surrounding_elements; float* normals; { std::ifstream file(data_file_name); file >> nel; nelr = block_length*((nel / block_length )+ std::min(1, nel % block_length)); float* h_areas = new float[nelr]; int* h_elements_surrounding_elements = new int[nelr*NNB]; float* h_normals = new float[nelr*NDIM*NNB]; // read in data for(int i = 0; i < nel; i++) { file >> h_areas[i]; for(int j = 0; j < NNB; j++) { file >> h_elements_surrounding_elements[i + j*nelr]; if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1; h_elements_surrounding_elements[i + j*nelr]--; for(int k = 0; k < NDIM; k++) { file >> h_normals[i + (j + k*NNB)*nelr]; h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr]; } } } // fill in remaining data int last = nel-1; for(int i = nel; i < nelr; i++) { h_areas[i] = h_areas[last]; for(int j = 0; j < NNB; j++) { // duplicate the last element h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr]; for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr]; } } areas = alloc<float>(nelr); upload<float>(areas, h_areas, nelr); elements_surrounding_elements = alloc<int>(nelr*NNB); upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB); normals = alloc<float>(nelr*NDIM*NNB); upload<float>(normals, h_normals, nelr*NDIM*NNB); delete[] h_areas; delete[] h_elements_surrounding_elements; delete[] h_normals; } // Create arrays and set initial conditions float* variables = alloc<float>(nelr*NVAR); initialize_variables(nelr, variables); float* old_variables = alloc<float>(nelr*NVAR); float* fluxes = alloc<float>(nelr*NVAR); float* step_factors = alloc<float>(nelr); // make sure all memory is floatly allocated before we start timing initialize_variables(nelr, old_variables); initialize_variables(nelr, fluxes); cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr ); // make sure CUDA isn't still doing something before we start timing cudaThreadSynchronize(); // these need to be computed the first time in order to compute time step std::cout << "[BENCH] Starting..." << std::endl; // StopWatchInterface *timer = 0; // sdkCreateTimer(&timer); // sdkStartTimer(&timer); // Begin iterations for(int i = 0; i < iterations; i++) { copy<float>(old_variables, variables, nelr*NVAR); // for the first iteration we compute the time step compute_step_factor(nelr, variables, areas, step_factors); checkCudaErrors for(int j = 0; j < RK; j++) { compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes); checkCudaErrors time_step(j, nelr, old_variables, variables, step_factors, fluxes); checkCudaErrors } } cudaThreadSynchronize(); // sdkStopTimer(&timer); // std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl; std::cout << "[BENCH] Saving solution..." << std::endl; dump(variables, nel, nelr); // std::cout << "Saved solution..." << std::endl; // std::cout << "Cleaning up..." << std::endl; dealloc<float>(areas); dealloc<int>(elements_surrounding_elements); dealloc<float>(normals); dealloc<float>(variables); dealloc<float>(old_variables); dealloc<float>(fluxes); dealloc<float>(step_factors); std::cout << "[BENCH] Done..." << std::endl; return 0; }
b874e824d2a62e8947639a230ba7fdbdc86b265f.hip
// !!! This is a file automatically generated by hipify!!! #include <limits> #include <math.h> #include <stdint.h> #include "bfs_loader.h" #include "executor/cuda_executor.h" #include "executor/executor.h" #include "soa/soa.h" static const int kMaxDegree = 10; static const int kMaxVertices = 20000; using ikra::soa::IndexType; using ikra::soa::SoaLayout; using ikra::executor::execute; class Vertex : public SoaLayout<Vertex, kMaxVertices> { public: IKRA_INITIALIZE_CLASS Vertex(const std::vector<IndexType>& neighbors) { // If this check fails, we the dataset cannot be run with this // implementation. assert(neighbors.size() <= kMaxDegree); adj_list_size_ = neighbors.size(); for (int i = 0; i < num_neighbors(); ++i) { Vertex* vertex = Vertex::get_uninitialized(neighbors[i]); adj_list_[i] = vertex; } } __host__ __device__ int num_neighbors() { return adj_list_size_; } // Visit the vertex, i.e., update the distances of all neighbors if this // vertex is in the frontier, as indicated by the "iteration" field. Returns // "true" if at least one neighbor was updated. __device__ bool visit(int iteration) { bool updated = false; if (distance_ == iteration) { for (int i = 0; i < num_neighbors(); ++i) { Vertex* neighbor = adj_list_[i]; updated |= neighbor->update_distance(distance_ + 1); } } return updated; } void print_distance() { printf("distance[%lu] = %i\n", id(), (int) distance_); } void set_distance(int value) { distance_ = value; } __device__ bool update_distance(int distance) { if (distance < distance_) { distance_ = distance; return true; } else { return false; } } int_ distance_ = std::numeric_limits<int>::max(); int_ adj_list_size_; // A fully inlined SOA array. array_(Vertex*, kMaxDegree, fully_inlined) adj_list_; }; IKRA_DEVICE_STORAGE(Vertex) int run() { int iteration = 0; bool running = true; while (running) { auto reducer = [](bool a, bool b) { return a || b; }; running = cuda_execute_and_reduce(&Vertex::visit, reducer, iteration); ++iteration; } return iteration; } int main(int argc, char* argv[]) { // Load vertices from file. if (argc != 4) { printf("Usage: %s filename num_vertices start_vertex\n", argv[0]); exit(1); } Vertex::initialize_storage(); load_file<Vertex>(argv[1], atoi(argv[2])); // Set start vertex. Vertex* start_vertex = Vertex::get(atoi(argv[3])); start_vertex->set_distance(0); // Start algorithm. int iterations = run(); // Note: execute is host side, cuda_execute is device side. printf("Iterations: %i\n", iterations); execute(&Vertex::print_distance); // Ensure nothing went wrong on the GPU. gpuErrchk(hipPeekAtLastError()); }
b874e824d2a62e8947639a230ba7fdbdc86b265f.cu
#include <limits> #include <math.h> #include <stdint.h> #include "bfs_loader.h" #include "executor/cuda_executor.h" #include "executor/executor.h" #include "soa/soa.h" static const int kMaxDegree = 10; static const int kMaxVertices = 20000; using ikra::soa::IndexType; using ikra::soa::SoaLayout; using ikra::executor::execute; class Vertex : public SoaLayout<Vertex, kMaxVertices> { public: IKRA_INITIALIZE_CLASS Vertex(const std::vector<IndexType>& neighbors) { // If this check fails, we the dataset cannot be run with this // implementation. assert(neighbors.size() <= kMaxDegree); adj_list_size_ = neighbors.size(); for (int i = 0; i < num_neighbors(); ++i) { Vertex* vertex = Vertex::get_uninitialized(neighbors[i]); adj_list_[i] = vertex; } } __host__ __device__ int num_neighbors() { return adj_list_size_; } // Visit the vertex, i.e., update the distances of all neighbors if this // vertex is in the frontier, as indicated by the "iteration" field. Returns // "true" if at least one neighbor was updated. __device__ bool visit(int iteration) { bool updated = false; if (distance_ == iteration) { for (int i = 0; i < num_neighbors(); ++i) { Vertex* neighbor = adj_list_[i]; updated |= neighbor->update_distance(distance_ + 1); } } return updated; } void print_distance() { printf("distance[%lu] = %i\n", id(), (int) distance_); } void set_distance(int value) { distance_ = value; } __device__ bool update_distance(int distance) { if (distance < distance_) { distance_ = distance; return true; } else { return false; } } int_ distance_ = std::numeric_limits<int>::max(); int_ adj_list_size_; // A fully inlined SOA array. array_(Vertex*, kMaxDegree, fully_inlined) adj_list_; }; IKRA_DEVICE_STORAGE(Vertex) int run() { int iteration = 0; bool running = true; while (running) { auto reducer = [](bool a, bool b) { return a || b; }; running = cuda_execute_and_reduce(&Vertex::visit, reducer, iteration); ++iteration; } return iteration; } int main(int argc, char* argv[]) { // Load vertices from file. if (argc != 4) { printf("Usage: %s filename num_vertices start_vertex\n", argv[0]); exit(1); } Vertex::initialize_storage(); load_file<Vertex>(argv[1], atoi(argv[2])); // Set start vertex. Vertex* start_vertex = Vertex::get(atoi(argv[3])); start_vertex->set_distance(0); // Start algorithm. int iterations = run(); // Note: execute is host side, cuda_execute is device side. printf("Iterations: %i\n", iterations); execute(&Vertex::print_distance); // Ensure nothing went wrong on the GPU. gpuErrchk(cudaPeekAtLastError()); }
56df80bf46cd26f9e7a550831bee019843390baa.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, false>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
56df80bf46cd26f9e7a550831bee019843390baa.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutSrc, int32_t, LayoutSrc, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 16, 16, false>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const int8_t* d_src, const int8_t* d_filter, const int32_t* d_bias, const int8_t* d_z, int8_t* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
17995f699b2d00debf48e5b1de7c412b477a9b64.hip
// !!! This is a file automatically generated by hipify!!! // Utilities and system includes #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP #define SIZE 60000000 #define TILE_DIM 1024 #define INNER_REPS 16 template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; T ra, rb, rc, rd; if (xIndex < SIZE) { ra=A[xIndex]; rb=A[SIZE-xIndex]; rc=A[xIndex]; rd=A[SIZE-xIndex]; // rb=A[xIndex]; #pragma unroll 16 for (int i=0;i<INNER_REPS;i++) { ra=ra*rc+rb; rb=rb*rd+rc; rc=rc*ra+rd; rd=rd*rb+ra; } C1[xIndex]=ra; C2[xIndex]=rb; C3[xIndex]=rc; C4[xIndex]=rd; } } int main(int argc, char **argv) { int outer_reps, vector_size, tile_dim; vector_size = SIZE; tile_dim = TILE_DIM; if (argc>1){ outer_reps = atoi(argv[1]); }else{ outer_reps = 1; } // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events hipEvent_t start, stop; size_t mem_size = static_cast<size_t>(sizeof(int) * vector_size); // allocate host memory int *h_iA = (int *) malloc(mem_size); int *h_oC1 = (int *) malloc(mem_size); int *h_oC2 = (int *) malloc(mem_size); int *h_oC3 = (int *) malloc(mem_size); int *h_oC4 = (int *) malloc(mem_size); // initalize host data for (int i = 0; i < vector_size; ++i) { h_iA[i] = (int) i+3; // h_iB[i] = (float) i+3; } // allocate device memory int *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4; hipMalloc((void **) &d_iA, mem_size); // hipMalloc((void **) &d_iB, mem_size); hipMalloc((void **) &d_oC1, mem_size); hipMalloc((void **) &d_oC2, mem_size); hipMalloc((void **) &d_oC3, mem_size); hipMalloc((void **) &d_oC4, mem_size); // copy host data to device hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice); // hipMemcpy(d_iB, h_iB, mem_size, hipMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events hipEventCreate(&start); hipEventCreate(&stop); // take measurements for loop over kernel launches hipEventRecord(start, 0); for (int i=0; i < outer_reps; i++) { hipLaunchKernelGGL(( simpleKernel<int>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4); } hipEventRecord(stop, 0); hipEventSynchronize(stop); float kernelTime; hipEventElapsedTime(&kernelTime, start, stop); // take measurements for loop inside kernel hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost); hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps); printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelBandwidth, kernelTime/outer_reps, vector_size, 1, tile_dim * 1); free(h_iA); // free(h_iB); free(h_oC1); free(h_oC2); free(h_oC3); free(h_oC4); hipFree(d_iA); // hipFree(d_iB); hipFree(d_oC1); hipFree(d_oC2); hipFree(d_oC3); hipFree(d_oC4); hipEventDestroy(start); hipEventDestroy(stop); hipDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
17995f699b2d00debf48e5b1de7c412b477a9b64.cu
// Utilities and system includes #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_profiler_api.h> #define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP #define SIZE 60000000 #define TILE_DIM 1024 #define INNER_REPS 16 template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4) { int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; T ra, rb, rc, rd; if (xIndex < SIZE) { ra=A[xIndex]; rb=A[SIZE-xIndex]; rc=A[xIndex]; rd=A[SIZE-xIndex]; // rb=A[xIndex]; #pragma unroll 16 for (int i=0;i<INNER_REPS;i++) { ra=ra*rc+rb; rb=rb*rd+rc; rc=rc*ra+rd; rd=rd*rb+ra; } C1[xIndex]=ra; C2[xIndex]=rb; C3[xIndex]=rc; C4[xIndex]=rd; } } int main(int argc, char **argv) { int outer_reps, vector_size, tile_dim; vector_size = SIZE; tile_dim = TILE_DIM; if (argc>1){ outer_reps = atoi(argv[1]); }else{ outer_reps = 1; } // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events cudaEvent_t start, stop; size_t mem_size = static_cast<size_t>(sizeof(int) * vector_size); // allocate host memory int *h_iA = (int *) malloc(mem_size); int *h_oC1 = (int *) malloc(mem_size); int *h_oC2 = (int *) malloc(mem_size); int *h_oC3 = (int *) malloc(mem_size); int *h_oC4 = (int *) malloc(mem_size); // initalize host data for (int i = 0; i < vector_size; ++i) { h_iA[i] = (int) i+3; // h_iB[i] = (float) i+3; } // allocate device memory int *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4; cudaMalloc((void **) &d_iA, mem_size); // cudaMalloc((void **) &d_iB, mem_size); cudaMalloc((void **) &d_oC1, mem_size); cudaMalloc((void **) &d_oC2, mem_size); cudaMalloc((void **) &d_oC3, mem_size); cudaMalloc((void **) &d_oC4, mem_size); // copy host data to device cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice); // cudaMemcpy(d_iB, h_iB, mem_size, cudaMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events cudaEventCreate(&start); cudaEventCreate(&stop); // take measurements for loop over kernel launches cudaEventRecord(start, 0); for (int i=0; i < outer_reps; i++) { simpleKernel<int><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float kernelTime; cudaEventElapsedTime(&kernelTime, start, stop); // take measurements for loop inside kernel cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); // report effective bandwidths float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps); printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n", kernelBandwidth, kernelTime/outer_reps, vector_size, 1, tile_dim * 1); free(h_iA); // free(h_iB); free(h_oC1); free(h_oC2); free(h_oC3); free(h_oC4); cudaFree(d_iA); // cudaFree(d_iB); cudaFree(d_oC1); cudaFree(d_oC2); cudaFree(d_oC3); cudaFree(d_oC4); cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
8b987972293a16c31284ddfd48389027dcb834a8.hip
// !!! This is a file automatically generated by hipify!!! #include "polygon.cuh" void SimPolygon::resetPoly() { poly.clear(); finished = false; } glm::vec3 SimPolygon::getPoint(int idx) { return poly[idx]; } std::vector<glm::vec3> SimPolygon::getVector() { return poly; } void SimPolygon::deleteData() { hipFree(d_poly); } void SimPolygon::passData() { size_t siz = poly.size() * 3 * sizeof(float); h_poly = (float*)malloc(siz); for (unsigned int i = 0; i < poly.size(); ++i) { h_poly[i * 3] = poly[i].x; h_poly[i * 3 + 1] = poly[i].y; h_poly[i * 3 + 2] = poly[i].z; } hipError_t err = hipFree(d_poly); if (err != 0) printf("(1) CUDA PASS ERR: %d\n", err); err = hipMalloc(&d_poly, siz); if (err != 0) printf("(2) CUDA PASS ERR: %d\n", err); err = hipMemcpy(d_poly, h_poly, siz, hipMemcpyHostToDevice); if (err != 0) printf("(3) CUDA PASS ERR: %d\n", err); free(h_poly); hipStreamSynchronize(0); } float * SimPolygon::getDeviceDataPtr() { return d_poly; } int SimPolygon::getPointByPosition(float x, float y, float sensitivity) { for (unsigned int i = 0; i < poly.size(); ++i) { if ((x-poly[i].x)*(x - poly[i].x) + (y - poly[i].z)*(y - poly[i].z) < sensitivity*sensitivity) { return i; } } return -1; } void SimPolygon::setPoint(int idx, glm::vec3 pos) { poly[idx] = pos; } void SimPolygon::deletePoint(int idx) { poly.erase(poly.begin()+idx); } void SimPolygon::shiftPoints() { for (unsigned int i = 0; i < poly2add.size(); ++i) { poly.push_back(poly2add[i]); } poly2add.clear(); } void SimPolygon::addPoint(glm::vec3 pos) { poly2add.push_back(pos); } SimPolygon::SimPolygon() { } SimPolygon::~SimPolygon() { }
8b987972293a16c31284ddfd48389027dcb834a8.cu
#include "polygon.cuh" void SimPolygon::resetPoly() { poly.clear(); finished = false; } glm::vec3 SimPolygon::getPoint(int idx) { return poly[idx]; } std::vector<glm::vec3> SimPolygon::getVector() { return poly; } void SimPolygon::deleteData() { cudaFree(d_poly); } void SimPolygon::passData() { size_t siz = poly.size() * 3 * sizeof(float); h_poly = (float*)malloc(siz); for (unsigned int i = 0; i < poly.size(); ++i) { h_poly[i * 3] = poly[i].x; h_poly[i * 3 + 1] = poly[i].y; h_poly[i * 3 + 2] = poly[i].z; } cudaError_t err = cudaFree(d_poly); if (err != 0) printf("(1) CUDA PASS ERR: %d\n", err); err = cudaMalloc(&d_poly, siz); if (err != 0) printf("(2) CUDA PASS ERR: %d\n", err); err = cudaMemcpy(d_poly, h_poly, siz, cudaMemcpyHostToDevice); if (err != 0) printf("(3) CUDA PASS ERR: %d\n", err); free(h_poly); cudaStreamSynchronize(0); } float * SimPolygon::getDeviceDataPtr() { return d_poly; } int SimPolygon::getPointByPosition(float x, float y, float sensitivity) { for (unsigned int i = 0; i < poly.size(); ++i) { if ((x-poly[i].x)*(x - poly[i].x) + (y - poly[i].z)*(y - poly[i].z) < sensitivity*sensitivity) { return i; } } return -1; } void SimPolygon::setPoint(int idx, glm::vec3 pos) { poly[idx] = pos; } void SimPolygon::deletePoint(int idx) { poly.erase(poly.begin()+idx); } void SimPolygon::shiftPoints() { for (unsigned int i = 0; i < poly2add.size(); ++i) { poly.push_back(poly2add[i]); } poly2add.clear(); } void SimPolygon::addPoint(glm::vec3 pos) { poly2add.push_back(pos); } SimPolygon::SimPolygon() { } SimPolygon::~SimPolygon() { }
01289304c0db83d7c6d57ef879300770f118d889.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Equihash CUDA solver // Copyright (c) 2016 John Tromp #include "equi.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "blake2b.cu" typedef uint16_t u16; typedef uint64_t u64; #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #ifndef RESTBITS #define RESTBITS 4 #endif // 2_log of number of buckets #define BUCKBITS (DIGITBITS-RESTBITS) // number of buckets static const u32 NBUCKETS = 1<<BUCKBITS; // 2_log of number of slots per bucket static const u32 SLOTBITS = RESTBITS+1+1; // number of slots per bucket static const u32 NSLOTS = 1<<SLOTBITS; // number of per-xhash slots static const u32 XFULL = 16; // SLOTBITS mask static const u32 SLOTMASK = NSLOTS-1; // number of possible values of xhash (rest of n) bits static const u32 NRESTS = 1<<RESTBITS; // number of blocks of hashes extracted from single 512 bit blake2b output static const u32 NBLOCKS = (NHASHES+HASHESPERBLAKE-1)/HASHESPERBLAKE; // nothing larger found in 100000 runs static const u32 MAXSOLS = 8; // scaling factor for showing bucketsize histogra as sparkline #ifndef SPARKSCALE #define SPARKSCALE (40 << (BUCKBITS-12)) #endif // tree node identifying its children as two different slots in // a bucket on previous layer with the same rest bits (x-tra hash) struct tree { unsigned bucketid : BUCKBITS; unsigned slotid0 : SLOTBITS; unsigned slotid1 : SLOTBITS; #ifdef XINTREE unsigned xhash : RESTBITS; #endif // layer 0 has no children bit needs to encode index __device__ u32 getindex() const { return (bucketid << SLOTBITS) | slotid0; } __device__ void setindex(const u32 idx) { slotid0 = idx & SLOTMASK; bucketid = idx >> SLOTBITS; } }; union hashunit { u32 word; uchar bytes[sizeof(u32)]; }; #define WORDS(bits) ((bits + 31) / 32) #define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS) #define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS) struct slot0 { tree attr; hashunit hash[HASHWORDS0]; }; struct slot1 { tree attr; hashunit hash[HASHWORDS1]; }; // a bucket is NSLOTS treenodes typedef slot0 bucket0[NSLOTS]; typedef slot1 bucket1[NSLOTS]; // the N-bit hash consists of K+1 n-bit "digits" // each of which corresponds to a layer of NBUCKETS buckets typedef bucket0 digit0[NBUCKETS]; typedef bucket1 digit1[NBUCKETS]; // size (in bytes) of hash in round 0 <= r < WK u32 hhashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r+1) * DIGITBITS; #else const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } // size (in bytes) of hash in round 0 <= r < WK __device__ u32 hashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r+1) * DIGITBITS; #else const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } u32 hhashwords(u32 bytes) { return (bytes + 3) / 4; } __device__ u32 hashwords(u32 bytes) { return (bytes + 3) / 4; } // manages hash and tree data struct htalloc { bucket0 *trees0[(WK+1)/2]; bucket1 *trees1[WK/2]; }; typedef u32 bsizes[NBUCKETS]; struct equi { blake2b_state blake_ctx; htalloc hta; bsizes *nslots; proof *sols; u32 nsols; u32 nthreads; equi(const u32 n_threads) { nthreads = n_threads; } void setnonce(const char *header, const u32 headerlen, const u32 nonce) { setheader(&blake_ctx, header, headerlen, nonce); checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32))); nsols = 0; } __device__ u32 getnslots(const u32 r, const u32 bid) { u32 &nslot = nslots[r&1][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ void orderindices(u32 *indices, u32 size) { if (indices[0] > indices[size]) { for (u32 i=0; i < size; i++) { const u32 tmp = indices[i]; indices[i] = indices[size+i]; indices[size+i] = tmp; } } } __device__ void listindices1(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[0][t.bucketid]; const u32 size = 1 << 0; indices[0] = buck[t.slotid0].attr.getindex(); indices[size] = buck[t.slotid1].attr.getindex(); orderindices(indices, size); } __device__ void listindices2(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[0][t.bucketid]; const u32 size = 1 << 1; listindices1(buck[t.slotid0].attr, indices); listindices1(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices3(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[1][t.bucketid]; const u32 size = 1 << 2; listindices2(buck[t.slotid0].attr, indices); listindices2(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices4(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[1][t.bucketid]; const u32 size = 1 << 3; listindices3(buck[t.slotid0].attr, indices); listindices3(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices5(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[2][t.bucketid]; const u32 size = 1 << 4; listindices4(buck[t.slotid0].attr, indices); listindices4(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices6(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[2][t.bucketid]; const u32 size = 1 << 5; listindices5(buck[t.slotid0].attr, indices); listindices5(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices7(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[3][t.bucketid]; const u32 size = 1 << 6; listindices6(buck[t.slotid0].attr, indices); listindices6(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices8(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[3][t.bucketid]; const u32 size = 1 << 7; listindices7(buck[t.slotid0].attr, indices); listindices7(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices9(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[4][t.bucketid]; const u32 size = 1 << 8; listindices8(buck[t.slotid0].attr, indices); listindices8(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void candidate(const tree t) { proof prf; #if WK==9 listindices9(t, prf); #elif WK==5 listindices5(t, prf); #else #error not implemented #endif if (probdupe(prf)) return; u32 soli = atomicAdd(&nsols, 1); if (soli < MAXSOLS) #if WK==9 listindices9(t, sols[soli]); #elif WK==5 listindices5(t, sols[soli]); #else #error not implemented #endif } void showbsizes(u32 r) { #if defined(HIST) || defined(SPARK) || defined(LOGSPARK) u32 ns[NBUCKETS]; checkCudaErrors(hipMemcpy(ns, nslots[r&1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost)); u32 binsizes[65]; memset(binsizes, 0, 65 * sizeof(u32)); for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) { u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS-6); binsizes[bsize]++; } for (u32 i=0; i < 65; i++) { #ifdef HIST printf(" %d:%d", i, binsizes[i]); #else #ifdef SPARK u32 sparks = binsizes[i] / SPARKSCALE; #else u32 sparks = 0; for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++; sparks = sparks * 7 / SPARKSCALE; #endif printf("\342\226%c", '\201' + sparks); #endif } printf("\n"); #endif } // proper dupe test is a little costly on GPU, so allow false negatives __device__ bool probdupe(u32 *prf) { unsigned short susp[PROOFSIZE]; memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short)); for (u32 i=0; i<PROOFSIZE; i++) { u32 bin = prf[i] & (PROOFSIZE-1); unsigned short msb = prf[i]>>WK; if (msb == susp[bin]) return true; susp[bin] = msb; } return false; } struct htlayout { htalloc hta; u32 prevhashunits; u32 nexthashunits; u32 dunits; u32 prevbo; u32 nextbo; __device__ htlayout(equi *eq, u32 r): hta(eq->hta), prevhashunits(0), dunits(0) { u32 nexthashbytes = hashsize(r); nexthashunits = hashwords(nexthashbytes); prevbo = 0; nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3 if (r) { u32 prevhashbytes = hashsize(r-1); prevhashunits = hashwords(prevhashbytes); prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3 dunits = prevhashunits - nexthashunits; } } __device__ u32 getxhash0(const slot0* pslot) const { #ifdef XINTREE return pslot->attr.xhash; #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] >> 4; #elif WN == 200 && RESTBITS == 8 return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo+1] >> 4; #elif WN == 144 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4; #else #error non implemented #endif } __device__ u32 getxhash1(const slot1* pslot) const { #ifdef XINTREE return pslot->attr.xhash; #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 8 return pslot->hash->bytes[prevbo]; #elif WN == 144 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return pslot->hash->bytes[prevbo] &0x3f; #else #error non implemented #endif } __device__ bool equal(const hashunit *hash0, const hashunit *hash1) const { return hash0[prevhashunits-1].word == hash1[prevhashunits-1].word; } }; struct collisiondata { #ifdef XBITMAP #if NSLOTS > 64 #error cant use XBITMAP with more than 64 slots #endif u64 xhashmap[NRESTS]; u64 xmap; #else #if RESTBITS <= 6 typedef uchar xslot; #else typedef u16 xslot; #endif xslot nxhashslots[NRESTS]; xslot xhashslots[NRESTS][XFULL]; xslot *xx; u32 n0; u32 n1; #endif u32 s0; __device__ void clear() { #ifdef XBITMAP memset(xhashmap, 0, NRESTS * sizeof(u64)); #else memset(nxhashslots, 0, NRESTS * sizeof(xslot)); #endif } __device__ bool addslot(u32 s1, u32 xh) { #ifdef XBITMAP xmap = xhashmap[xh]; xhashmap[xh] |= (u64)1 << s1; s0 = ~0; return true; #else n1 = (u32)nxhashslots[xh]++; if (n1 >= XFULL) return false; xx = xhashslots[xh]; xx[n1] = s1; n0 = 0; return true; #endif } __device__ bool nextcollision() const { #ifdef XBITMAP return xmap != 0; #else return n0 < n1; #endif } __device__ u32 slot() { #ifdef XBITMAP const u32 ffs = __ffsll(xmap); s0 += ffs; xmap >>= ffs; return s0; #else return (u32)xx[n0++]; #endif } }; }; __global__ void digitH(equi *eq) { uchar hash[HASHOUT]; blake2b_state state; equi::htlayout htl(eq, 0); const u32 hashbytes = hashsize(0); const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 block = id; block < NBLOCKS; block += eq->nthreads) { state = eq->blake_ctx; blake2b_gpu_hash(&state, block, hash, HASHOUT); for (u32 i = 0; i<HASHESPERBLAKE; i++) { const uchar *ph = hash + i * WN/8; #if BUCKBITS == 16 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 8) | ph[1]; #ifdef XINTREE const u32 xhash = ph[2] >> 4; #endif #elif BUCKBITS == 14 && RESTBITS == 6 const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2; #elif BUCKBITS == 12 && RESTBITS == 8 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; #elif BUCKBITS == 20 && RESTBITS == 4 const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4; #ifdef XINTREE const u32 xhash = ph[2] & 0xf; #endif #elif BUCKBITS == 12 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; const u32 xhash = ph[1] & 0xf; #else #error not implemented #endif const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1); if (slot >= NSLOTS) continue; tree leaf; leaf.setindex(block*HASHESPERBLAKE+i); #ifdef XINTREE leaf.xhash = xhash; #endif slot0 &s = eq->hta.trees0[0][bucketid][slot]; s.attr = leaf; memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes); } } } __global__ void digitO(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(r-1)/2][bucketid]; // optimize by updating previous buck?! u32 bsize = eq->getnslots(r-1, bucketid); // optimize by putting bucketsize with block?! for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; // optimize by updating previous pslot1?! if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; #elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4) | (xhash = bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4; xhash &= 0xf; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4) | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) & 0xf) << 8) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 2 | (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 6; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; #ifdef XINTREE xort.xhash = xhash; #endif slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot]; xs.attr = xort; for (u32 i=htl.dunits; i < htl.prevhashunits; i++) xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } __global__ void digitE(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[(r-1)/2][bucketid]; // OPTIMIZE BY UPDATING PREVIOUS u32 bsize = eq->getnslots(r-1, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; // OPTIMIZE BY UPDATING PREVIOUS if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; #elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4) | (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 6) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 2; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; #ifdef XINTREE xort.xhash = xhash; #endif slot0 &xs = htl.hta.trees0[r/2][xorbucketid][xorslot]; xs.attr = xort; for (u32 i=htl.dunits; i < htl.prevhashunits; i++) xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } #ifdef UNROLL __global__ void digit_1(equi *eq) { equi::htlayout htl(eq, 1); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[0][bucketid]; u32 bsize = eq->getnslots(0, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word; } } } } __global__ void digit2(equi *eq) { equi::htlayout htl(eq, 2); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[0][bucketid]; u32 bsize = eq->getnslots(1, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[0].word ^ pslot1->hash[0].word; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit3(equi *eq) { equi::htlayout htl(eq, 3); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[1][bucketid]; u32 bsize = eq->getnslots(2, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit4(equi *eq) { equi::htlayout htl(eq, 4); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[1][bucketid]; u32 bsize = eq->getnslots(3, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[0].word ^ pslot1->hash[0].word; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit5(equi *eq) { equi::htlayout htl(eq, 5); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[2][bucketid]; u32 bsize = eq->getnslots(4, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit6(equi *eq) { equi::htlayout htl(eq, 6); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[2][bucketid]; u32 bsize = eq->getnslots(5, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; } } } } __global__ void digit7(equi *eq) { equi::htlayout htl(eq, 7); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[3][bucketid]; u32 bsize = eq->getnslots(6, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[0].word ^ pslot1->hash[0].word; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; } } } } __global__ void digit8(equi *eq) { equi::htlayout htl(eq, 8); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[3][bucketid]; u32 bsize = eq->getnslots(7, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; // OPTIMIZE BY UPDATING PREVIOUS if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; } } } } #endif __global__ void digitK(equi *eq) { equi::collisiondata cd; equi::htlayout htl(eq, WK); const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(WK-1)/2][bucketid]; u32 bsize = eq->getnslots(WK-1, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) { tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; eq->candidate(xort); } } } } } #include <unistd.h> int main(int argc, char **argv) { int nthreads = 8192; int nonce = 0; int tpb = 0; int range = 1; bool showsol = false; const char *header = ""; int c; while ((c = getopt (argc, argv, "h:n:r:t:p:s")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': nonce = atoi(optarg); break; case 't': nthreads = atoi(optarg); break; case 'p': tpb = atoi(optarg); break; case 'r': range = atoi(optarg); break; case 's': showsol = true; break; } } if (!tpb) // if not set, then default threads per block to roughly square root of threads for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ; printf("Looking for wagner-tree on (\"%s\",%d", header, nonce); if (range > 1) printf("-%d", nonce+range-1); printf(") with %d %d-bits digits and %d threads (%d per block)\n", NDIGITS, DIGITBITS, nthreads, tpb); equi eq(nthreads); u32 *heap0, *heap1; checkCudaErrors(hipMalloc((void**)&heap0, sizeof(digit0))); checkCudaErrors(hipMalloc((void**)&heap1, sizeof(digit1))); for (u32 r=0; r < WK; r++) if ((r&1) == 0) eq.hta.trees0[r/2] = (bucket0 *)(heap0 + r/2); else eq.hta.trees1[r/2] = (bucket1 *)(heap1 + r/2); checkCudaErrors(hipMalloc((void**)&eq.nslots, 2 * NBUCKETS * sizeof(u32))); checkCudaErrors(hipMalloc((void**)&eq.sols, MAXSOLS * sizeof(proof))); equi *device_eq; checkCudaErrors(hipMalloc((void**)&device_eq, sizeof(equi))); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); proof sols[MAXSOLS]; u32 sumnsols = 0; for (int r = 0; r < range; r++) { hipEventRecord(start, NULL); eq.setnonce(header, strlen(header), nonce+r); checkCudaErrors(hipMemcpy(device_eq, &eq, sizeof(equi), hipMemcpyHostToDevice)); printf("Digit 0\n"); hipLaunchKernelGGL(( digitH), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(0); #if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL) printf("Digit %d\n", 1); hipLaunchKernelGGL(( digit_1), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(1); printf("Digit %d\n", 2); hipLaunchKernelGGL(( digit2), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(2); printf("Digit %d\n", 3); hipLaunchKernelGGL(( digit3), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(3); printf("Digit %d\n", 4); hipLaunchKernelGGL(( digit4), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(4); printf("Digit %d\n", 5); hipLaunchKernelGGL(( digit5), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(5); printf("Digit %d\n", 6); hipLaunchKernelGGL(( digit6), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(6); printf("Digit %d\n", 7); hipLaunchKernelGGL(( digit7), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(7); printf("Digit %d\n", 8); hipLaunchKernelGGL(( digit8), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); eq.showbsizes(8); #else for (u32 r=1; r < WK; r++) { printf("Digit %d\n", r); r&1 ? hipLaunchKernelGGL(( digitO), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq, r) : hipLaunchKernelGGL(( digitE), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq, r); eq.showbsizes(r); } #endif printf("Digit %d\n", WK); hipLaunchKernelGGL(( digitK), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_eq); checkCudaErrors(hipMemcpy(&eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(sols, eq.sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost)); hipEventRecord(stop, NULL); hipEventSynchronize(stop); float duration; hipEventElapsedTime(&duration, start, stop); printf("%d rounds completed in %.3f seconds.\n", WK, duration / 1000.0f); u32 nsols = 0; for (unsigned s = 0; s < eq.nsols; s++) { if (duped(sols[s])) { printf("Duped!\n"); continue; } nsols++; if (showsol) { printf("Solution"); for (int i = 0; i < PROOFSIZE; i++) printf(" %jx", (uintmax_t)sols[s][i]); printf("\n"); } } printf("%d solutions\n", nsols); sumnsols += nsols; } checkCudaErrors(hipFree(eq.nslots)); checkCudaErrors(hipFree(eq.sols)); checkCudaErrors(hipFree(eq.hta.trees0[0])); checkCudaErrors(hipFree(eq.hta.trees1[0])); printf("%d total solutions\n", sumnsols); return 0; }
01289304c0db83d7c6d57ef879300770f118d889.cu
// Equihash CUDA solver // Copyright (c) 2016 John Tromp #include "equi.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include "blake2b.cu" typedef uint16_t u16; typedef uint64_t u64; #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #ifndef RESTBITS #define RESTBITS 4 #endif // 2_log of number of buckets #define BUCKBITS (DIGITBITS-RESTBITS) // number of buckets static const u32 NBUCKETS = 1<<BUCKBITS; // 2_log of number of slots per bucket static const u32 SLOTBITS = RESTBITS+1+1; // number of slots per bucket static const u32 NSLOTS = 1<<SLOTBITS; // number of per-xhash slots static const u32 XFULL = 16; // SLOTBITS mask static const u32 SLOTMASK = NSLOTS-1; // number of possible values of xhash (rest of n) bits static const u32 NRESTS = 1<<RESTBITS; // number of blocks of hashes extracted from single 512 bit blake2b output static const u32 NBLOCKS = (NHASHES+HASHESPERBLAKE-1)/HASHESPERBLAKE; // nothing larger found in 100000 runs static const u32 MAXSOLS = 8; // scaling factor for showing bucketsize histogra as sparkline #ifndef SPARKSCALE #define SPARKSCALE (40 << (BUCKBITS-12)) #endif // tree node identifying its children as two different slots in // a bucket on previous layer with the same rest bits (x-tra hash) struct tree { unsigned bucketid : BUCKBITS; unsigned slotid0 : SLOTBITS; unsigned slotid1 : SLOTBITS; #ifdef XINTREE unsigned xhash : RESTBITS; #endif // layer 0 has no children bit needs to encode index __device__ u32 getindex() const { return (bucketid << SLOTBITS) | slotid0; } __device__ void setindex(const u32 idx) { slotid0 = idx & SLOTMASK; bucketid = idx >> SLOTBITS; } }; union hashunit { u32 word; uchar bytes[sizeof(u32)]; }; #define WORDS(bits) ((bits + 31) / 32) #define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS) #define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS) struct slot0 { tree attr; hashunit hash[HASHWORDS0]; }; struct slot1 { tree attr; hashunit hash[HASHWORDS1]; }; // a bucket is NSLOTS treenodes typedef slot0 bucket0[NSLOTS]; typedef slot1 bucket1[NSLOTS]; // the N-bit hash consists of K+1 n-bit "digits" // each of which corresponds to a layer of NBUCKETS buckets typedef bucket0 digit0[NBUCKETS]; typedef bucket1 digit1[NBUCKETS]; // size (in bytes) of hash in round 0 <= r < WK u32 hhashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r+1) * DIGITBITS; #else const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } // size (in bytes) of hash in round 0 <= r < WK __device__ u32 hashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r+1) * DIGITBITS; #else const u32 hashbits = WN - (r+1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } u32 hhashwords(u32 bytes) { return (bytes + 3) / 4; } __device__ u32 hashwords(u32 bytes) { return (bytes + 3) / 4; } // manages hash and tree data struct htalloc { bucket0 *trees0[(WK+1)/2]; bucket1 *trees1[WK/2]; }; typedef u32 bsizes[NBUCKETS]; struct equi { blake2b_state blake_ctx; htalloc hta; bsizes *nslots; proof *sols; u32 nsols; u32 nthreads; equi(const u32 n_threads) { nthreads = n_threads; } void setnonce(const char *header, const u32 headerlen, const u32 nonce) { setheader(&blake_ctx, header, headerlen, nonce); checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32))); nsols = 0; } __device__ u32 getnslots(const u32 r, const u32 bid) { u32 &nslot = nslots[r&1][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ void orderindices(u32 *indices, u32 size) { if (indices[0] > indices[size]) { for (u32 i=0; i < size; i++) { const u32 tmp = indices[i]; indices[i] = indices[size+i]; indices[size+i] = tmp; } } } __device__ void listindices1(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[0][t.bucketid]; const u32 size = 1 << 0; indices[0] = buck[t.slotid0].attr.getindex(); indices[size] = buck[t.slotid1].attr.getindex(); orderindices(indices, size); } __device__ void listindices2(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[0][t.bucketid]; const u32 size = 1 << 1; listindices1(buck[t.slotid0].attr, indices); listindices1(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices3(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[1][t.bucketid]; const u32 size = 1 << 2; listindices2(buck[t.slotid0].attr, indices); listindices2(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices4(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[1][t.bucketid]; const u32 size = 1 << 3; listindices3(buck[t.slotid0].attr, indices); listindices3(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices5(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[2][t.bucketid]; const u32 size = 1 << 4; listindices4(buck[t.slotid0].attr, indices); listindices4(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices6(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[2][t.bucketid]; const u32 size = 1 << 5; listindices5(buck[t.slotid0].attr, indices); listindices5(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices7(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[3][t.bucketid]; const u32 size = 1 << 6; listindices6(buck[t.slotid0].attr, indices); listindices6(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices8(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[3][t.bucketid]; const u32 size = 1 << 7; listindices7(buck[t.slotid0].attr, indices); listindices7(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void listindices9(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[4][t.bucketid]; const u32 size = 1 << 8; listindices8(buck[t.slotid0].attr, indices); listindices8(buck[t.slotid1].attr, indices+size); orderindices(indices, size); } __device__ void candidate(const tree t) { proof prf; #if WK==9 listindices9(t, prf); #elif WK==5 listindices5(t, prf); #else #error not implemented #endif if (probdupe(prf)) return; u32 soli = atomicAdd(&nsols, 1); if (soli < MAXSOLS) #if WK==9 listindices9(t, sols[soli]); #elif WK==5 listindices5(t, sols[soli]); #else #error not implemented #endif } void showbsizes(u32 r) { #if defined(HIST) || defined(SPARK) || defined(LOGSPARK) u32 ns[NBUCKETS]; checkCudaErrors(cudaMemcpy(ns, nslots[r&1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost)); u32 binsizes[65]; memset(binsizes, 0, 65 * sizeof(u32)); for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) { u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS-6); binsizes[bsize]++; } for (u32 i=0; i < 65; i++) { #ifdef HIST printf(" %d:%d", i, binsizes[i]); #else #ifdef SPARK u32 sparks = binsizes[i] / SPARKSCALE; #else u32 sparks = 0; for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++; sparks = sparks * 7 / SPARKSCALE; #endif printf("\342\226%c", '\201' + sparks); #endif } printf("\n"); #endif } // proper dupe test is a little costly on GPU, so allow false negatives __device__ bool probdupe(u32 *prf) { unsigned short susp[PROOFSIZE]; memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short)); for (u32 i=0; i<PROOFSIZE; i++) { u32 bin = prf[i] & (PROOFSIZE-1); unsigned short msb = prf[i]>>WK; if (msb == susp[bin]) return true; susp[bin] = msb; } return false; } struct htlayout { htalloc hta; u32 prevhashunits; u32 nexthashunits; u32 dunits; u32 prevbo; u32 nextbo; __device__ htlayout(equi *eq, u32 r): hta(eq->hta), prevhashunits(0), dunits(0) { u32 nexthashbytes = hashsize(r); nexthashunits = hashwords(nexthashbytes); prevbo = 0; nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3 if (r) { u32 prevhashbytes = hashsize(r-1); prevhashunits = hashwords(prevhashbytes); prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3 dunits = prevhashunits - nexthashunits; } } __device__ u32 getxhash0(const slot0* pslot) const { #ifdef XINTREE return pslot->attr.xhash; #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] >> 4; #elif WN == 200 && RESTBITS == 8 return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo+1] >> 4; #elif WN == 144 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4; #else #error non implemented #endif } __device__ u32 getxhash1(const slot1* pslot) const { #ifdef XINTREE return pslot->attr.xhash; #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 8 return pslot->hash->bytes[prevbo]; #elif WN == 144 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return pslot->hash->bytes[prevbo] &0x3f; #else #error non implemented #endif } __device__ bool equal(const hashunit *hash0, const hashunit *hash1) const { return hash0[prevhashunits-1].word == hash1[prevhashunits-1].word; } }; struct collisiondata { #ifdef XBITMAP #if NSLOTS > 64 #error cant use XBITMAP with more than 64 slots #endif u64 xhashmap[NRESTS]; u64 xmap; #else #if RESTBITS <= 6 typedef uchar xslot; #else typedef u16 xslot; #endif xslot nxhashslots[NRESTS]; xslot xhashslots[NRESTS][XFULL]; xslot *xx; u32 n0; u32 n1; #endif u32 s0; __device__ void clear() { #ifdef XBITMAP memset(xhashmap, 0, NRESTS * sizeof(u64)); #else memset(nxhashslots, 0, NRESTS * sizeof(xslot)); #endif } __device__ bool addslot(u32 s1, u32 xh) { #ifdef XBITMAP xmap = xhashmap[xh]; xhashmap[xh] |= (u64)1 << s1; s0 = ~0; return true; #else n1 = (u32)nxhashslots[xh]++; if (n1 >= XFULL) return false; xx = xhashslots[xh]; xx[n1] = s1; n0 = 0; return true; #endif } __device__ bool nextcollision() const { #ifdef XBITMAP return xmap != 0; #else return n0 < n1; #endif } __device__ u32 slot() { #ifdef XBITMAP const u32 ffs = __ffsll(xmap); s0 += ffs; xmap >>= ffs; return s0; #else return (u32)xx[n0++]; #endif } }; }; __global__ void digitH(equi *eq) { uchar hash[HASHOUT]; blake2b_state state; equi::htlayout htl(eq, 0); const u32 hashbytes = hashsize(0); const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 block = id; block < NBLOCKS; block += eq->nthreads) { state = eq->blake_ctx; blake2b_gpu_hash(&state, block, hash, HASHOUT); for (u32 i = 0; i<HASHESPERBLAKE; i++) { const uchar *ph = hash + i * WN/8; #if BUCKBITS == 16 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 8) | ph[1]; #ifdef XINTREE const u32 xhash = ph[2] >> 4; #endif #elif BUCKBITS == 14 && RESTBITS == 6 const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2; #elif BUCKBITS == 12 && RESTBITS == 8 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; #elif BUCKBITS == 20 && RESTBITS == 4 const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4; #ifdef XINTREE const u32 xhash = ph[2] & 0xf; #endif #elif BUCKBITS == 12 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; const u32 xhash = ph[1] & 0xf; #else #error not implemented #endif const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1); if (slot >= NSLOTS) continue; tree leaf; leaf.setindex(block*HASHESPERBLAKE+i); #ifdef XINTREE leaf.xhash = xhash; #endif slot0 &s = eq->hta.trees0[0][bucketid][slot]; s.attr = leaf; memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes); } } } __global__ void digitO(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(r-1)/2][bucketid]; // optimize by updating previous buck?! u32 bsize = eq->getnslots(r-1, bucketid); // optimize by putting bucketsize with block?! for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; // optimize by updating previous pslot1?! if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; #elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4) | (xhash = bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4; xhash &= 0xf; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4) | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) & 0xf) << 8) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 2 | (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 6; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; #ifdef XINTREE xort.xhash = xhash; #endif slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot]; xs.attr = xort; for (u32 i=htl.dunits; i < htl.prevhashunits; i++) xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } __global__ void digitE(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[(r-1)/2][bucketid]; // OPTIMIZE BY UPDATING PREVIOUS u32 bsize = eq->getnslots(r-1, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; // OPTIMIZE BY UPDATING PREVIOUS if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; #elif WN == 144 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 8) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2])) << 4) | (bytes0[htl.prevbo+3] ^ bytes1[htl.prevbo+3]) >> 4; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 4) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((u32)(bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]) << 6) | (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 2; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; #ifdef XINTREE xort.xhash = xhash; #endif slot0 &xs = htl.hta.trees0[r/2][xorbucketid][xorslot]; xs.attr = xort; for (u32 i=htl.dunits; i < htl.prevhashunits; i++) xs.hash[i-htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } #ifdef UNROLL __global__ void digit_1(equi *eq) { equi::htlayout htl(eq, 1); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[0][bucketid]; u32 bsize = eq->getnslots(0, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word; } } } } __global__ void digit2(equi *eq) { equi::htlayout htl(eq, 2); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[0][bucketid]; u32 bsize = eq->getnslots(1, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[0].word ^ pslot1->hash[0].word; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit3(equi *eq) { equi::htlayout htl(eq, 3); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[1][bucketid]; u32 bsize = eq->getnslots(2, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit4(equi *eq) { equi::htlayout htl(eq, 4); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[1][bucketid]; u32 bsize = eq->getnslots(3, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[0].word ^ pslot1->hash[0].word; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit5(equi *eq) { equi::htlayout htl(eq, 5); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[2][bucketid]; u32 bsize = eq->getnslots(4, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit6(equi *eq) { equi::htlayout htl(eq, 6); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[2][bucketid]; u32 bsize = eq->getnslots(5, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; } } } } __global__ void digit7(equi *eq) { equi::htlayout htl(eq, 7); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[3][bucketid]; u32 bsize = eq->getnslots(6, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1])) << 4 | (xhash = bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; xhash &= 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[0].word ^ pslot1->hash[0].word; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; } } } } __global__ void digit8(equi *eq) { equi::htlayout htl(eq, 8); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid=id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[3][bucketid]; u32 bsize = eq->getnslots(7, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; // OPTIMIZE BY UPDATING PREVIOUS if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo+1] ^ bytes1[htl.prevbo+1]); xhash = (bytes0[htl.prevbo+2] ^ bytes1[htl.prevbo+2]) >> 4; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; xort.xhash = xhash; slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot]; xs.attr = xort; xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; } } } } #endif __global__ void digitK(equi *eq) { equi::collisiondata cd; equi::htlayout htl(eq, WK); const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(WK-1)/2][bucketid]; u32 bsize = eq->getnslots(WK-1, bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd continue; for (; cd.nextcollision(); ) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) { tree xort; xort.bucketid = bucketid; xort.slotid0 = s0; xort.slotid1 = s1; eq->candidate(xort); } } } } } #include <unistd.h> int main(int argc, char **argv) { int nthreads = 8192; int nonce = 0; int tpb = 0; int range = 1; bool showsol = false; const char *header = ""; int c; while ((c = getopt (argc, argv, "h:n:r:t:p:s")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': nonce = atoi(optarg); break; case 't': nthreads = atoi(optarg); break; case 'p': tpb = atoi(optarg); break; case 'r': range = atoi(optarg); break; case 's': showsol = true; break; } } if (!tpb) // if not set, then default threads per block to roughly square root of threads for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ; printf("Looking for wagner-tree on (\"%s\",%d", header, nonce); if (range > 1) printf("-%d", nonce+range-1); printf(") with %d %d-bits digits and %d threads (%d per block)\n", NDIGITS, DIGITBITS, nthreads, tpb); equi eq(nthreads); u32 *heap0, *heap1; checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0))); checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1))); for (u32 r=0; r < WK; r++) if ((r&1) == 0) eq.hta.trees0[r/2] = (bucket0 *)(heap0 + r/2); else eq.hta.trees1[r/2] = (bucket1 *)(heap1 + r/2); checkCudaErrors(cudaMalloc((void**)&eq.nslots, 2 * NBUCKETS * sizeof(u32))); checkCudaErrors(cudaMalloc((void**)&eq.sols, MAXSOLS * sizeof(proof))); equi *device_eq; checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi))); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); proof sols[MAXSOLS]; u32 sumnsols = 0; for (int r = 0; r < range; r++) { cudaEventRecord(start, NULL); eq.setnonce(header, strlen(header), nonce+r); checkCudaErrors(cudaMemcpy(device_eq, &eq, sizeof(equi), cudaMemcpyHostToDevice)); printf("Digit 0\n"); digitH<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(0); #if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL) printf("Digit %d\n", 1); digit_1<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(1); printf("Digit %d\n", 2); digit2<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(2); printf("Digit %d\n", 3); digit3<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(3); printf("Digit %d\n", 4); digit4<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(4); printf("Digit %d\n", 5); digit5<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(5); printf("Digit %d\n", 6); digit6<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(6); printf("Digit %d\n", 7); digit7<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(7); printf("Digit %d\n", 8); digit8<<<nthreads/tpb,tpb >>>(device_eq); eq.showbsizes(8); #else for (u32 r=1; r < WK; r++) { printf("Digit %d\n", r); r&1 ? digitO<<<nthreads/tpb,tpb >>>(device_eq, r) : digitE<<<nthreads/tpb,tpb >>>(device_eq, r); eq.showbsizes(r); } #endif printf("Digit %d\n", WK); digitK<<<nthreads/tpb,tpb >>>(device_eq); checkCudaErrors(cudaMemcpy(&eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(sols, eq.sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost)); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float duration; cudaEventElapsedTime(&duration, start, stop); printf("%d rounds completed in %.3f seconds.\n", WK, duration / 1000.0f); u32 nsols = 0; for (unsigned s = 0; s < eq.nsols; s++) { if (duped(sols[s])) { printf("Duped!\n"); continue; } nsols++; if (showsol) { printf("Solution"); for (int i = 0; i < PROOFSIZE; i++) printf(" %jx", (uintmax_t)sols[s][i]); printf("\n"); } } printf("%d solutions\n", nsols); sumnsols += nsols; } checkCudaErrors(cudaFree(eq.nslots)); checkCudaErrors(cudaFree(eq.sols)); checkCudaErrors(cudaFree(eq.hta.trees0[0])); checkCudaErrors(cudaFree(eq.hta.trees1[0])); printf("%d total solutions\n", sumnsols); return 0; }
2840c0a116683f557413d81f210d297091dc451e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2016-2018, Brian Kennedy. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ /****************************************************************************** * * See https://github.com/Simantex/CSVImporter for repository and documentation. * ******************************************************************************/ #include "CSV_kernel_declarations.cuh" #include "csvImporter.h" extern "C" void launch_RecordsColumnsChars_StreamCompact(uint32_t * d_MatchesRecs, uint32_t * d_MatchesCols, uint32_t * d_MatchesChars, uint32_t * d_ScanRecs, uint32_t * d_ScanCols, uint32_t * d_ScanChars, uint32_t * d_OrdinalsRecs, uint32_t * d_OrdinalsCols, uint32_t * d_OrdinalsChars, uint32_t * d_OrdinalsRecsToCols, uint32_t * d_OrdinalsColsToChars, uint32_t ValuesCount) { // Call stream compact kernel. int iThreads = 256; float fBlocks = (float)ValuesCount / ((float)iThreads); int iBlocks = ValuesCount / iThreads; fBlocks = fBlocks - iBlocks; if (fBlocks > 0) iBlocks++; hipLaunchKernelGGL(( RecordsColumnsChars_StreamCompact) , dim3(iBlocks), dim3(iThreads) , 0, 0, d_MatchesRecs, d_MatchesCols, d_MatchesChars, d_ScanRecs, d_ScanCols, d_ScanChars, d_OrdinalsRecs, d_OrdinalsCols, d_OrdinalsChars, d_OrdinalsRecsToCols, d_OrdinalsColsToChars, ValuesCount); Check_cuda_Errors("RecordsColumnsChars_StreamCompact"); } // specialized stream compact version that checks for match headers for both records and columns, and builds records, columns, and recordstocolumns tables. // For each match header the current index of the match header is copied to the ordinals array at the index in the ordinals array which equals the scan value at the same position as the header. // this assumes the ordinals arrays have been properly sized coming in. // records to columns would normally be a simple multiple, e.g., if there are 10 columns per record, the array index multiple would be 10. // as long as that is true, only the columns table would be needed. // however, we assume there might be column count errors, in which case the different tables provide precise access between records and columns // and allow us to check for column count errors. // the new version adds processing utf8 chars. // it processes char scans and outputs the char table and the columns to chars table. __global__ void RecordsColumnsChars_StreamCompact(uint32_t * d_MatchesRecs, uint32_t * d_MatchesCols, uint32_t * d_MatchesChars, uint32_t * d_ScanRecs, uint32_t * d_ScanCols, uint32_t * d_ScanChars, uint32_t * d_OrdinalsRecs, uint32_t * d_OrdinalsCols, uint32_t * d_OrdinalsChars, uint32_t * d_OrdinalsRecsToCols, uint32_t * d_OrdinalsColsToChars, uint32_t ValuesCount) { int ix = blockIdx.x * blockDim.x + threadIdx.x; if (ix >= ValuesCount) return; // ignore anything in last block beyond source arrays length. // the index into the result arrays is simply the Exclusive Scan value at the current position. if (d_MatchesRecs[ix] == 1) { d_OrdinalsRecs[d_ScanRecs[ix]] = ix; // the recstocols puts in the same relative position as the recs table the SCAN value of the cols, for looking up into the cols table. d_OrdinalsRecsToCols[d_ScanRecs[ix]] = d_ScanCols[ix]; } if (d_MatchesCols[ix] == 1) { d_OrdinalsCols[d_ScanCols[ix]] = ix; // the recstocols puts in the same relative position as the recs table the SCAN value of the cols, for looking up into the cols table. d_OrdinalsColsToChars[d_ScanCols[ix]] = d_ScanChars[ix]; } if (d_MatchesChars[ix] == 1) { d_OrdinalsChars[d_ScanChars[ix]] = ix; } return; }
2840c0a116683f557413d81f210d297091dc451e.cu
/****************************************************************************** * Copyright (c) 2016-2018, Brian Kennedy. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ /****************************************************************************** * * See https://github.com/Simantex/CSVImporter for repository and documentation. * ******************************************************************************/ #include "CSV_kernel_declarations.cuh" #include "csvImporter.h" extern "C" void launch_RecordsColumnsChars_StreamCompact(uint32_t * d_MatchesRecs, uint32_t * d_MatchesCols, uint32_t * d_MatchesChars, uint32_t * d_ScanRecs, uint32_t * d_ScanCols, uint32_t * d_ScanChars, uint32_t * d_OrdinalsRecs, uint32_t * d_OrdinalsCols, uint32_t * d_OrdinalsChars, uint32_t * d_OrdinalsRecsToCols, uint32_t * d_OrdinalsColsToChars, uint32_t ValuesCount) { // Call stream compact kernel. int iThreads = 256; float fBlocks = (float)ValuesCount / ((float)iThreads); int iBlocks = ValuesCount / iThreads; fBlocks = fBlocks - iBlocks; if (fBlocks > 0) iBlocks++; RecordsColumnsChars_StreamCompact <<<iBlocks, iThreads >>> (d_MatchesRecs, d_MatchesCols, d_MatchesChars, d_ScanRecs, d_ScanCols, d_ScanChars, d_OrdinalsRecs, d_OrdinalsCols, d_OrdinalsChars, d_OrdinalsRecsToCols, d_OrdinalsColsToChars, ValuesCount); Check_cuda_Errors("RecordsColumnsChars_StreamCompact"); } // specialized stream compact version that checks for match headers for both records and columns, and builds records, columns, and recordstocolumns tables. // For each match header the current index of the match header is copied to the ordinals array at the index in the ordinals array which equals the scan value at the same position as the header. // this assumes the ordinals arrays have been properly sized coming in. // records to columns would normally be a simple multiple, e.g., if there are 10 columns per record, the array index multiple would be 10. // as long as that is true, only the columns table would be needed. // however, we assume there might be column count errors, in which case the different tables provide precise access between records and columns // and allow us to check for column count errors. // the new version adds processing utf8 chars. // it processes char scans and outputs the char table and the columns to chars table. __global__ void RecordsColumnsChars_StreamCompact(uint32_t * d_MatchesRecs, uint32_t * d_MatchesCols, uint32_t * d_MatchesChars, uint32_t * d_ScanRecs, uint32_t * d_ScanCols, uint32_t * d_ScanChars, uint32_t * d_OrdinalsRecs, uint32_t * d_OrdinalsCols, uint32_t * d_OrdinalsChars, uint32_t * d_OrdinalsRecsToCols, uint32_t * d_OrdinalsColsToChars, uint32_t ValuesCount) { int ix = blockIdx.x * blockDim.x + threadIdx.x; if (ix >= ValuesCount) return; // ignore anything in last block beyond source arrays length. // the index into the result arrays is simply the Exclusive Scan value at the current position. if (d_MatchesRecs[ix] == 1) { d_OrdinalsRecs[d_ScanRecs[ix]] = ix; // the recstocols puts in the same relative position as the recs table the SCAN value of the cols, for looking up into the cols table. d_OrdinalsRecsToCols[d_ScanRecs[ix]] = d_ScanCols[ix]; } if (d_MatchesCols[ix] == 1) { d_OrdinalsCols[d_ScanCols[ix]] = ix; // the recstocols puts in the same relative position as the recs table the SCAN value of the cols, for looking up into the cols table. d_OrdinalsColsToChars[d_ScanCols[ix]] = d_ScanChars[ix]; } if (d_MatchesChars[ix] == 1) { d_OrdinalsChars[d_ScanChars[ix]] = ix; } return; }
4e696e423d31ca7c22c80dd8818ac6760ffeed33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ //final kernel //INSERT KERNEL CODE HERE //__device__ __constant__ float test_image[1024]; __constant__ float conv[800]; __constant__ float bias[8]; __global__ void convolution(float* test_image ,float* out ) { //int Row = blockIdx.y* blockDim.y + threadIdx.y; //int Col = blockIdx.x* blockDim.x + threadIdx.x; int convid = blockIdx.x; int row = threadIdx.y; int col = threadIdx.x; int index = 8*(row*28+col); //loading the matrix into shared memory __shared__ float Matrix1[28][28]; Matrix1[row][col]=test_image[row*28 + col]; __syncthreads(); float value = 0.0f; for (int i=-4 ;i<=5;i++){ for(int j=-4 ;j<=5 ;j++){ int ModRow = row +i; int ModCol = col +j; if(ModRow>=0 && ModCol>=0 && ModRow<28 && ModCol<28){ int temp = (i+4)*10 + j+4; value += Matrix1[ModRow][ModCol]*conv[temp +100*convid]; } } } out[index+convid] = (value+bias[convid])>=0? (value +bias[convid]) :0.0f; } __global__ void Multiply0(float *imageData , float* multiplier , int multiplier_height,float* matrixresult ,float* bias1){ __shared__ float ds_M[128]; int Col = blockIdx.x*blockDim.x+threadIdx.x; double Pvalue = 0.0; for (int m = 0; m < 49; m++) { ds_M[threadIdx.x] = imageData[128*m + threadIdx.x]; __syncthreads(); for (int k = 0; k < 128; k++) Pvalue += ds_M[k] * multiplier[(m*128+k)*512 +Col]; } matrixresult[Col] = (Pvalue +bias1[Col] )>=0?(Pvalue +bias1[Col] ):0; } __global__ void Multiply1(float *imageData , float* multiplier , float* matrixresult , float* bias){ __shared__ float ds_M[512][10]; int col = blockIdx.x; int row = threadIdx.x; ds_M[row][col] = multiplier[row*10+col]* imageData[row]; __syncthreads(); if (threadIdx.x==0){ float value=0.0; for (int i=0 ; i<512 ; i++) value+=ds_M[i][blockIdx.x]; matrixresult[col]= value+ bias[col]; } } __global__ void Mul0(float *imageData , float* multiplier ,float* matrixresult){ int row = 7*threadIdx.x; int col = 2*blockIdx.x; for(int i=0; i<7 ;i++){ if((row+i)<6272){ matrixresult[(row+i)*512+col] = multiplier[(row+i)*512+col]*imageData[col]; matrixresult[(row+i)*512+col+1] = multiplier[(row+i)*512+col+1]*imageData[col+1]; } } }
4e696e423d31ca7c22c80dd8818ac6760ffeed33.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ //final kernel //INSERT KERNEL CODE HERE //__device__ __constant__ float test_image[1024]; __constant__ float conv[800]; __constant__ float bias[8]; __global__ void convolution(float* test_image ,float* out ) { //int Row = blockIdx.y* blockDim.y + threadIdx.y; //int Col = blockIdx.x* blockDim.x + threadIdx.x; int convid = blockIdx.x; int row = threadIdx.y; int col = threadIdx.x; int index = 8*(row*28+col); //loading the matrix into shared memory __shared__ float Matrix1[28][28]; Matrix1[row][col]=test_image[row*28 + col]; __syncthreads(); float value = 0.0f; for (int i=-4 ;i<=5;i++){ for(int j=-4 ;j<=5 ;j++){ int ModRow = row +i; int ModCol = col +j; if(ModRow>=0 && ModCol>=0 && ModRow<28 && ModCol<28){ int temp = (i+4)*10 + j+4; value += Matrix1[ModRow][ModCol]*conv[temp +100*convid]; } } } out[index+convid] = (value+bias[convid])>=0? (value +bias[convid]) :0.0f; } __global__ void Multiply0(float *imageData , float* multiplier , int multiplier_height,float* matrixresult ,float* bias1){ __shared__ float ds_M[128]; int Col = blockIdx.x*blockDim.x+threadIdx.x; double Pvalue = 0.0; for (int m = 0; m < 49; m++) { ds_M[threadIdx.x] = imageData[128*m + threadIdx.x]; __syncthreads(); for (int k = 0; k < 128; k++) Pvalue += ds_M[k] * multiplier[(m*128+k)*512 +Col]; } matrixresult[Col] = (Pvalue +bias1[Col] )>=0?(Pvalue +bias1[Col] ):0; } __global__ void Multiply1(float *imageData , float* multiplier , float* matrixresult , float* bias){ __shared__ float ds_M[512][10]; int col = blockIdx.x; int row = threadIdx.x; ds_M[row][col] = multiplier[row*10+col]* imageData[row]; __syncthreads(); if (threadIdx.x==0){ float value=0.0; for (int i=0 ; i<512 ; i++) value+=ds_M[i][blockIdx.x]; matrixresult[col]= value+ bias[col]; } } __global__ void Mul0(float *imageData , float* multiplier ,float* matrixresult){ int row = 7*threadIdx.x; int col = 2*blockIdx.x; for(int i=0; i<7 ;i++){ if((row+i)<6272){ matrixresult[(row+i)*512+col] = multiplier[(row+i)*512+col]*imageData[col]; matrixresult[(row+i)*512+col+1] = multiplier[(row+i)*512+col+1]*imageData[col+1]; } } }
7f1afc76b4a64cb40e508a9e2288d9510a356f6a.hip
// !!! This is a file automatically generated by hipify!!! #include "gpuerrchk.h" #include "data.h" data::data(double dinput, int iinput, int size){ msize = size; gpuErrchk(hipMalloc((void**)&dptr,sizeof(double)*size)); gpuErrchk(hipMalloc((void**)&iptr,sizeof(int)*size)); double *h_dptr = (double*)malloc(sizeof(double)*size); int *h_iptr = (int*)malloc(sizeof(int)*size); for(int j=0;j<size;j++){ h_dptr[j] = dinput * (double)j; h_iptr[j] = iinput * j; } gpuErrchk(hipMemcpy(dptr,h_dptr,sizeof(double)*size,hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(iptr,h_iptr,sizeof(int)*size,hipMemcpyHostToDevice)); free(h_dptr); free(h_iptr); } data::~data(){ } __device__ void data::foo(int input, double& output){ output = iptr[input] + dptr[input]; }
7f1afc76b4a64cb40e508a9e2288d9510a356f6a.cu
#include "gpuerrchk.h" #include "data.h" data::data(double dinput, int iinput, int size){ msize = size; gpuErrchk(cudaMalloc((void**)&dptr,sizeof(double)*size)); gpuErrchk(cudaMalloc((void**)&iptr,sizeof(int)*size)); double *h_dptr = (double*)malloc(sizeof(double)*size); int *h_iptr = (int*)malloc(sizeof(int)*size); for(int j=0;j<size;j++){ h_dptr[j] = dinput * (double)j; h_iptr[j] = iinput * j; } gpuErrchk(cudaMemcpy(dptr,h_dptr,sizeof(double)*size,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(iptr,h_iptr,sizeof(int)*size,cudaMemcpyHostToDevice)); free(h_dptr); free(h_iptr); } data::~data(){ } __device__ void data::foo(int input, double& output){ output = iptr[input] + dptr[input]; }
efd905652c3d12a530826fdfc8a121a9e8af72f1.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <vector> #include <iostream> #include <string> #include <nvmatrix.cuh> #include <nvmatrix_operators.cuh> #include <matrix.h> #include <convnet.cuh> #include <util.cuh> using namespace std; /* * ======================= * ConvNet * ======================= */ ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID) : Thread(false), _deviceID(deviceID), _data(NULL) { try { int numLayers = PyList_GET_SIZE(layerParams); for (int i = 0; i < numLayers; i++) { PyObject* paramsDict = PyList_GET_ITEM(layerParams, i); string layerType = pyDictGetString(paramsDict, "type"); Layer* l = initLayer(layerType, paramsDict); // Connect backward links in graph for this layer intv* inputLayers = pyDictGetIntV(paramsDict, "inputs"); if (inputLayers != NULL) { for (int i = 0; i < inputLayers->size(); i++) { l->addPrev(&getLayer(inputLayers->at(i))); } } delete inputLayers; } // Connect the forward links in the graph for (int i = 0; i < _layers.size(); i++) { vector<Layer*>& prev = _layers[i]->getPrev(); for (int j = 0; j < prev.size(); j++) { prev[j]->addNext(_layers[i]); } } // Execute post-initialization stuff for (int i = 0; i < _layers.size(); i++) { _layers[i]->postInit(); } _dp = new DataProvider(minibatchSize); } catch (string& s) { cout << "Error creating ConvNet: " << s << endl; exit(1); } } /* * Override this in derived classes */ Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict) { if (layerType == "fc") { _layers.push_back(new FCLayer(this, paramsDict)); } else if (layerType == "conv") { _layers.push_back(new ConvLayer(this, paramsDict)); } else if (layerType == "local") { _layers.push_back(new LocalUnsharedLayer(this, paramsDict)); } else if (layerType == "pool") { _layers.push_back(&PoolLayer::makePoolLayer(this, paramsDict)); } else if (layerType == "rnorm") { _layers.push_back(new ResponseNormLayer(this, paramsDict)); } else if (layerType == "cmrnorm") { _layers.push_back(new CrossMapResponseNormLayer(this, paramsDict)); } else if (layerType == "cnorm") { _layers.push_back(new ContrastNormLayer(this, paramsDict)); } else if (layerType == "softmax") { _layers.push_back(new SoftmaxLayer(this, paramsDict)); } else if (layerType == "eltsum") { _layers.push_back(new EltwiseSumLayer(this, paramsDict)); } else if (layerType == "eltmul") { _layers.push_back(new EltwiseMulLayer(this, paramsDict)); } else if (layerType == "eltmax") { _layers.push_back(new EltwiseMaxLayer(this, paramsDict)); } else if (layerType == "forward") { _layers.push_back(new ForwardLayer(this, paramsDict)); } else if (layerType == "slice") { _layers.push_back(new SliceLayer(this, paramsDict)); } else if (layerType == "concat") { _layers.push_back(new ConcatenationLayer(this, paramsDict)); } else if (layerType == "neuron") { _layers.push_back(new NeuronLayer(this, paramsDict)); } else if (layerType == "nailbed") { _layers.push_back(new NailbedLayer(this, paramsDict)); } else if (layerType == "blur") { _layers.push_back(new GaussianBlurLayer(this, paramsDict)); } else if (layerType == "resize") { _layers.push_back(new ResizeLayer(this, paramsDict)); } else if (layerType == "rgb2yuv") { _layers.push_back(new RGBToYUVLayer(this, paramsDict)); } else if (layerType == "rgb2lab") { _layers.push_back(new RGBToLABLayer(this, paramsDict)); } else if (layerType == "data") { DataLayer *d = new DataLayer(this, paramsDict); _layers.push_back(d); _dataLayers.push_back(d); } else if (strncmp(layerType.c_str(), "cost.", 5) == 0) { CostLayer *c = &CostLayer::makeCostLayer(this, layerType, paramsDict); _layers.push_back(c); _costs.push_back(c); } else { throw string("Unknown layer type ") + layerType; } return _layers.back(); } /* * This executes in a new CPU thread so it's OK to initialize CUDA stuff here. */ void ConvNet::initCuda() { hipSetDevice(_deviceID < 0 ? gpuGetMaxGflopsDeviceId() : _deviceID); hipDeviceSetCacheConfig(hipFuncCachePreferShared); hipblasInit(); NVMatrix::initRandom(time(0)); copyToGPU(); } void* ConvNet::run() { initCuda(); while (true) { Worker* worker = _workerQueue.dequeue(); worker->run(); delete worker; } return NULL; } Queue<Worker*>& ConvNet::getWorkerQueue() { return _workerQueue; } Queue<WorkResult*>& ConvNet::getResultQueue() { return _resultQueue; } DataProvider& ConvNet::getDataProvider() { return *_dp; } Layer& ConvNet::operator[](int idx) { return *_layers[idx]; } Layer& ConvNet::getLayer(int idx) { return *_layers[idx]; } void ConvNet::copyToCPU() { for (int i = 0; i < _layers.size(); i++) { _layers[i]->copyToCPU(); } } void ConvNet::copyToGPU() { for (int i = 0; i < _layers.size(); i++) { _layers[i]->copyToGPU(); } } void ConvNet::updateWeights() { for (int i = 0; i < _layers.size(); i++) { _layers[i]->updateWeights(); } } void ConvNet::reset() { for (int i = 0; i < _layers.size(); i++) { _layers[i]->reset(); } } int ConvNet::getNumLayers() { return _layers.size(); } void ConvNet::bprop(PASS_TYPE passType) { for (int i = 0; i < _costs.size(); i++) { _costs[i]->bprop(passType); } reset(); } void ConvNet::fprop(PASS_TYPE passType) { assert(_data != NULL); reset(); for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->fprop(_data->getData(), passType); } } void ConvNet::fprop(GPUData& data, PASS_TYPE passType) { if (&data != _data) { delete _data; } _data = &data; fprop(passType); } void ConvNet::fprop(int miniIdx, PASS_TYPE passType) { delete _data; _data = &_dp->getMinibatch(miniIdx); fprop(passType); } Cost& ConvNet::getCost() { return *new Cost(_data->getNumCases(), _costs); } // Same as getCost() but adds results to given cost and returns it Cost& ConvNet::getCost(Cost& cost) { Cost& newCost = getCost(); cost += newCost; delete &newCost; return cost; } double ConvNet::getCostValue() { Cost& cost = getCost(); double val = cost.getValue(); delete &cost; return val; } /* * Gradient checking stuff */ void ConvNet::checkGradients() { _numFailures = 0; _numTests = 0; fprop(0, PASS_GC); _baseErr = getCostValue(); bprop(PASS_GC); for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) { (*it)->checkGradients(); } cout << "------------------------" << endl; if (_numFailures > 0) { cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl; } else { cout << "ALL " << _numTests << " TESTS PASSED" << endl; } } /* * name: weight matrix name * eps: finite difference step */ bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) { Matrix numGrad(weights.getNumRows(), weights.getNumCols()); Matrix diff(numGrad); numGrad.apply(Matrix::ZERO); Matrix weightsCPU; weights.getW().copyToHost(weightsCPU, true); for(int i = 0; i < weights.getNumRows(); i++) { for (int j = 0; j < weights.getNumCols(); j++) { float v = weightsCPU(i,j); weightsCPU(i,j) += eps; weights.getW().copyFromHost(weightsCPU); weightsCPU(i,j) = v; fprop(PASS_GC); double err = getCostValue(); numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps); if (isnan(numGrad(i,j)) || isinf(numGrad(i,j))) { cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl; cout << "Consider reducing the sizes of the weights or finite difference steps." << endl; cout << "Exiting." << endl; exit(1); } weights.getW().copyFromHost(weightsCPU); } } Matrix gradCPU; weights.getGrad().copyToHost(gradCPU, true); gradCPU.scale(-1.0 / _data->getNumCases()); float analNorm = gradCPU.norm(); float numNorm = numGrad.norm(); numGrad.subtract(gradCPU, diff); float relErr = diff.norm() / analNorm; bool fail = relErr >= GC_REL_ERR_THRESH; if (fail || !GC_SUPPRESS_PASSES) { cout << "========================" << endl; printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str()); cout << "========================" << endl; cout << "Analytic:" << endl; gradCPU.print(6,4); cout << "Numeric:" << endl; numGrad.print(6,4); printf("Analytic norm: %e\n", analNorm); printf("Numeric norm: %e\n", numNorm); printf("Relative error: %e\n", relErr); } _numTests++; _numFailures += fail; return fail; }
efd905652c3d12a530826fdfc8a121a9e8af72f1.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <vector> #include <iostream> #include <string> #include <nvmatrix.cuh> #include <nvmatrix_operators.cuh> #include <matrix.h> #include <convnet.cuh> #include <util.cuh> using namespace std; /* * ======================= * ConvNet * ======================= */ ConvNet::ConvNet(PyListObject* layerParams, int minibatchSize, int deviceID) : Thread(false), _deviceID(deviceID), _data(NULL) { try { int numLayers = PyList_GET_SIZE(layerParams); for (int i = 0; i < numLayers; i++) { PyObject* paramsDict = PyList_GET_ITEM(layerParams, i); string layerType = pyDictGetString(paramsDict, "type"); Layer* l = initLayer(layerType, paramsDict); // Connect backward links in graph for this layer intv* inputLayers = pyDictGetIntV(paramsDict, "inputs"); if (inputLayers != NULL) { for (int i = 0; i < inputLayers->size(); i++) { l->addPrev(&getLayer(inputLayers->at(i))); } } delete inputLayers; } // Connect the forward links in the graph for (int i = 0; i < _layers.size(); i++) { vector<Layer*>& prev = _layers[i]->getPrev(); for (int j = 0; j < prev.size(); j++) { prev[j]->addNext(_layers[i]); } } // Execute post-initialization stuff for (int i = 0; i < _layers.size(); i++) { _layers[i]->postInit(); } _dp = new DataProvider(minibatchSize); } catch (string& s) { cout << "Error creating ConvNet: " << s << endl; exit(1); } } /* * Override this in derived classes */ Layer* ConvNet::initLayer(string& layerType, PyObject* paramsDict) { if (layerType == "fc") { _layers.push_back(new FCLayer(this, paramsDict)); } else if (layerType == "conv") { _layers.push_back(new ConvLayer(this, paramsDict)); } else if (layerType == "local") { _layers.push_back(new LocalUnsharedLayer(this, paramsDict)); } else if (layerType == "pool") { _layers.push_back(&PoolLayer::makePoolLayer(this, paramsDict)); } else if (layerType == "rnorm") { _layers.push_back(new ResponseNormLayer(this, paramsDict)); } else if (layerType == "cmrnorm") { _layers.push_back(new CrossMapResponseNormLayer(this, paramsDict)); } else if (layerType == "cnorm") { _layers.push_back(new ContrastNormLayer(this, paramsDict)); } else if (layerType == "softmax") { _layers.push_back(new SoftmaxLayer(this, paramsDict)); } else if (layerType == "eltsum") { _layers.push_back(new EltwiseSumLayer(this, paramsDict)); } else if (layerType == "eltmul") { _layers.push_back(new EltwiseMulLayer(this, paramsDict)); } else if (layerType == "eltmax") { _layers.push_back(new EltwiseMaxLayer(this, paramsDict)); } else if (layerType == "forward") { _layers.push_back(new ForwardLayer(this, paramsDict)); } else if (layerType == "slice") { _layers.push_back(new SliceLayer(this, paramsDict)); } else if (layerType == "concat") { _layers.push_back(new ConcatenationLayer(this, paramsDict)); } else if (layerType == "neuron") { _layers.push_back(new NeuronLayer(this, paramsDict)); } else if (layerType == "nailbed") { _layers.push_back(new NailbedLayer(this, paramsDict)); } else if (layerType == "blur") { _layers.push_back(new GaussianBlurLayer(this, paramsDict)); } else if (layerType == "resize") { _layers.push_back(new ResizeLayer(this, paramsDict)); } else if (layerType == "rgb2yuv") { _layers.push_back(new RGBToYUVLayer(this, paramsDict)); } else if (layerType == "rgb2lab") { _layers.push_back(new RGBToLABLayer(this, paramsDict)); } else if (layerType == "data") { DataLayer *d = new DataLayer(this, paramsDict); _layers.push_back(d); _dataLayers.push_back(d); } else if (strncmp(layerType.c_str(), "cost.", 5) == 0) { CostLayer *c = &CostLayer::makeCostLayer(this, layerType, paramsDict); _layers.push_back(c); _costs.push_back(c); } else { throw string("Unknown layer type ") + layerType; } return _layers.back(); } /* * This executes in a new CPU thread so it's OK to initialize CUDA stuff here. */ void ConvNet::initCuda() { cudaSetDevice(_deviceID < 0 ? gpuGetMaxGflopsDeviceId() : _deviceID); cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); cublasInit(); NVMatrix::initRandom(time(0)); copyToGPU(); } void* ConvNet::run() { initCuda(); while (true) { Worker* worker = _workerQueue.dequeue(); worker->run(); delete worker; } return NULL; } Queue<Worker*>& ConvNet::getWorkerQueue() { return _workerQueue; } Queue<WorkResult*>& ConvNet::getResultQueue() { return _resultQueue; } DataProvider& ConvNet::getDataProvider() { return *_dp; } Layer& ConvNet::operator[](int idx) { return *_layers[idx]; } Layer& ConvNet::getLayer(int idx) { return *_layers[idx]; } void ConvNet::copyToCPU() { for (int i = 0; i < _layers.size(); i++) { _layers[i]->copyToCPU(); } } void ConvNet::copyToGPU() { for (int i = 0; i < _layers.size(); i++) { _layers[i]->copyToGPU(); } } void ConvNet::updateWeights() { for (int i = 0; i < _layers.size(); i++) { _layers[i]->updateWeights(); } } void ConvNet::reset() { for (int i = 0; i < _layers.size(); i++) { _layers[i]->reset(); } } int ConvNet::getNumLayers() { return _layers.size(); } void ConvNet::bprop(PASS_TYPE passType) { for (int i = 0; i < _costs.size(); i++) { _costs[i]->bprop(passType); } reset(); } void ConvNet::fprop(PASS_TYPE passType) { assert(_data != NULL); reset(); for (int i = 0; i < _dataLayers.size(); i++) { _dataLayers[i]->fprop(_data->getData(), passType); } } void ConvNet::fprop(GPUData& data, PASS_TYPE passType) { if (&data != _data) { delete _data; } _data = &data; fprop(passType); } void ConvNet::fprop(int miniIdx, PASS_TYPE passType) { delete _data; _data = &_dp->getMinibatch(miniIdx); fprop(passType); } Cost& ConvNet::getCost() { return *new Cost(_data->getNumCases(), _costs); } // Same as getCost() but adds results to given cost and returns it Cost& ConvNet::getCost(Cost& cost) { Cost& newCost = getCost(); cost += newCost; delete &newCost; return cost; } double ConvNet::getCostValue() { Cost& cost = getCost(); double val = cost.getValue(); delete &cost; return val; } /* * Gradient checking stuff */ void ConvNet::checkGradients() { _numFailures = 0; _numTests = 0; fprop(0, PASS_GC); _baseErr = getCostValue(); bprop(PASS_GC); for (vector<Layer*>::iterator it = _layers.begin(); it != _layers.end(); ++it) { (*it)->checkGradients(); } cout << "------------------------" << endl; if (_numFailures > 0) { cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl; } else { cout << "ALL " << _numTests << " TESTS PASSED" << endl; } } /* * name: weight matrix name * eps: finite difference step */ bool ConvNet::checkGradient(const string& name, float eps, Weights& weights) { Matrix numGrad(weights.getNumRows(), weights.getNumCols()); Matrix diff(numGrad); numGrad.apply(Matrix::ZERO); Matrix weightsCPU; weights.getW().copyToHost(weightsCPU, true); for(int i = 0; i < weights.getNumRows(); i++) { for (int j = 0; j < weights.getNumCols(); j++) { float v = weightsCPU(i,j); weightsCPU(i,j) += eps; weights.getW().copyFromHost(weightsCPU); weightsCPU(i,j) = v; fprop(PASS_GC); double err = getCostValue(); numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps); if (isnan(numGrad(i,j)) || isinf(numGrad(i,j))) { cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl; cout << "Consider reducing the sizes of the weights or finite difference steps." << endl; cout << "Exiting." << endl; exit(1); } weights.getW().copyFromHost(weightsCPU); } } Matrix gradCPU; weights.getGrad().copyToHost(gradCPU, true); gradCPU.scale(-1.0 / _data->getNumCases()); float analNorm = gradCPU.norm(); float numNorm = numGrad.norm(); numGrad.subtract(gradCPU, diff); float relErr = diff.norm() / analNorm; bool fail = relErr >= GC_REL_ERR_THRESH; if (fail || !GC_SUPPRESS_PASSES) { cout << "========================" << endl; printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str()); cout << "========================" << endl; cout << "Analytic:" << endl; gradCPU.print(6,4); cout << "Numeric:" << endl; numGrad.print(6,4); printf("Analytic norm: %e\n", analNorm); printf("Numeric norm: %e\n", numNorm); printf("Relative error: %e\n", relErr); } _numTests++; _numFailures += fail; return fail; }
e2b0bb3e809b6f415e7178dee65228490c401815.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/top_k_kernel.h" #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/funcs/gather.cu.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { namespace ops = paddle::operators; #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) template <typename T, typename Context> void TopkKernel(const Context& dev_ctx, const DenseTensor& x, const Scalar& k_scalar, int axis, bool largest, bool sorted, DenseTensor* out, DenseTensor* indices) { const auto* input = &x; // get the input dims const auto& in_dims = input->dims(); // calcluate the real axis if (axis < 0) axis += in_dims.size(); int k = k_scalar.to<int>(); if (k_scalar.FromTensor()) { phi::DDim out_dims = out->dims(); out_dims[axis] = k; out->Resize(out_dims); indices->Resize(out_dims); } const auto& out_dims = out->dims(); const T* input_data = input->data<T>(); T* output_data = dev_ctx.template Alloc<T>(out); int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices); if (axis == in_dims.size() - 1) { // if get the topK from the last axis const int64_t& input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; if (k > input_width) { k = input_width; } // The conclusion is drawn from the data through multiple sets of // statistics if (input_width >= 128 && k >= input_width * 0.75) { auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx); if (ops::SortTopk<T>(*ctx, input, input_width, input_height, k, out, indices, largest)) { // Successed, return. return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } #if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 9000 if (input_width >= 1024 && in_dims.size() == 1) { // 1. Gather TopK, but without sorting constexpr int max_num_threads = 1024; if (largest) { hipLaunchKernelGGL(( ops::RadixTopK<T, true>) , dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(), input_data, k, input_height, input_width, output_data, indices_data); } else { hipLaunchKernelGGL(( ops::RadixTopK<T, false>) , dim3(input_height), dim3(max_num_threads), 0, dev_ctx.stream(), input_data, k, input_height, input_width, output_data, indices_data); } // 2. Sort if needed if (sorted) { DenseTensor sorted_output; DenseTensor sorted_indices; DenseTensor gather_indices; sorted_output.Resize(out->dims()); sorted_indices.Resize(indices->dims()); gather_indices.Resize(indices->dims()); dev_ctx.template Alloc<T>(&sorted_output); dev_ctx.template Alloc<int64_t>(&sorted_indices); dev_ctx.template Alloc<int64_t>(&gather_indices); auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx); if (ops::SortTopk<T>(*ctx, out, k, input_height, k, &sorted_output, &sorted_indices, largest)) { funcs::GPUGather<int64_t, int64_t>( dev_ctx, *indices, sorted_indices, &gather_indices); Copy(dev_ctx, gather_indices, indices->place(), false, indices); Copy(dev_ctx, sorted_output, out->place(), false, out); return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } else { return; } } #endif // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (ops::GetDesiredBlockDim(input_width)) { #ifdef PADDLE_WITH_HIP FIXED_BLOCK_DIM( hipLaunchKernelGGL(( ops::KeMatrixTopK<T, 20, kBlockDim>) , dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #else FIXED_BLOCK_DIM( hipLaunchKernelGGL(( ops::KeMatrixTopK<T, 5, kBlockDim>) , dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #endif default: PADDLE_THROW(errors::Fatal( "the input data shape has error in the topk cuda kernel.")); } } else { // if get topK not from the last axis, will tranpose the tensor and get // TopK // first step, prepare the trans args for the tranpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); phi::DDim trans_dims(in_dims); phi::DDim trans_out_dims(out->dims()); for (int i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; trans_out_dims[i] = out_dims[trans[i]]; } // second step, tranpose the input DenseTensor trans_input; trans_input.Resize(trans_dims); dev_ctx.template Alloc<T>(&trans_input); int ndims = trans.size(); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, *input, &trans_input, trans); // third step, calcluate the topk // allocate the tmp cuda memory for the tmp result DenseTensor trans_ind; DenseTensor trans_out; trans_ind.Resize(trans_out_dims); trans_out.Resize(trans_out_dims); dev_ctx.template Alloc<int64_t>(&trans_ind); dev_ctx.template Alloc<T>(&trans_out); const int64_t input_height = phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; if (k > input_width) k = input_width; // The conclusion is drawn from the data through multiple sets of // statistics if (input_width >= 128 && k >= input_width * 0.75) { auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx); if (ops::SortTopk<T>(*ctx, &trans_input, input_width, input_height, k, &trans_out, &trans_ind, largest)) { // last step, tranpose back the indices and output funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, out, trans); return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (ops::GetDesiredBlockDim(input_width)) { #ifdef PADDLE_WITH_HIP FIXED_BLOCK_DIM( hipLaunchKernelGGL(( ops::KeMatrixTopK<T, 20, kBlockDim>) , dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), trans_out.data<T>(), k, trans_ind.data<int64_t>(), trans_input.data<T>(), input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #else FIXED_BLOCK_DIM( hipLaunchKernelGGL(( ops::KeMatrixTopK<T, 5, kBlockDim>) , dim3(gridx), dim3(kBlockDim), 0, dev_ctx.stream(), trans_out.data<T>(), k, trans_ind.data<int64_t>(), trans_input.data<T>(), input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #endif default: PADDLE_THROW(errors::Fatal( "the input data shape has error in the topk cuda kernel.")); } // last step, tranpose back the indices and output funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, out, trans); } } #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace phi PD_REGISTER_KERNEL(top_k, GPU, ALL_LAYOUT, phi::TopkKernel, float, double, int, int64_t, phi::dtype::float16) {}
e2b0bb3e809b6f415e7178dee65228490c401815.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/top_k_kernel.h" #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/kernels/funcs/gather.cu.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { namespace ops = paddle::operators; #define FIXED_BLOCK_DIM_BASE(dim, ...) \ case (dim): { \ constexpr auto kBlockDim = (dim); \ __VA_ARGS__; \ } break #define FIXED_BLOCK_DIM(...) \ FIXED_BLOCK_DIM_BASE(256, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(128, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(64, ##__VA_ARGS__); \ FIXED_BLOCK_DIM_BASE(32, ##__VA_ARGS__) template <typename T, typename Context> void TopkKernel(const Context& dev_ctx, const DenseTensor& x, const Scalar& k_scalar, int axis, bool largest, bool sorted, DenseTensor* out, DenseTensor* indices) { const auto* input = &x; // get the input dims const auto& in_dims = input->dims(); // calcluate the real axis if (axis < 0) axis += in_dims.size(); int k = k_scalar.to<int>(); if (k_scalar.FromTensor()) { phi::DDim out_dims = out->dims(); out_dims[axis] = k; out->Resize(out_dims); indices->Resize(out_dims); } const auto& out_dims = out->dims(); const T* input_data = input->data<T>(); T* output_data = dev_ctx.template Alloc<T>(out); int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices); if (axis == in_dims.size() - 1) { // if get the topK from the last axis const int64_t& input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; if (k > input_width) { k = input_width; } // The conclusion is drawn from the data through multiple sets of // statistics if (input_width >= 128 && k >= input_width * 0.75) { auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx); if (ops::SortTopk<T>(*ctx, input, input_width, input_height, k, out, indices, largest)) { // Successed, return. return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } #if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 9000 if (input_width >= 1024 && in_dims.size() == 1) { // 1. Gather TopK, but without sorting constexpr int max_num_threads = 1024; if (largest) { ops::RadixTopK<T, true> <<<input_height, max_num_threads, 0, dev_ctx.stream()>>>( input_data, k, input_height, input_width, output_data, indices_data); } else { ops::RadixTopK<T, false> <<<input_height, max_num_threads, 0, dev_ctx.stream()>>>( input_data, k, input_height, input_width, output_data, indices_data); } // 2. Sort if needed if (sorted) { DenseTensor sorted_output; DenseTensor sorted_indices; DenseTensor gather_indices; sorted_output.Resize(out->dims()); sorted_indices.Resize(indices->dims()); gather_indices.Resize(indices->dims()); dev_ctx.template Alloc<T>(&sorted_output); dev_ctx.template Alloc<int64_t>(&sorted_indices); dev_ctx.template Alloc<int64_t>(&gather_indices); auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx); if (ops::SortTopk<T>(*ctx, out, k, input_height, k, &sorted_output, &sorted_indices, largest)) { funcs::GPUGather<int64_t, int64_t>( dev_ctx, *indices, sorted_indices, &gather_indices); Copy(dev_ctx, gather_indices, indices->place(), false, indices); Copy(dev_ctx, sorted_output, out->place(), false, out); return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } else { return; } } #endif // NOTE: pass lds and dim same to input width. // NOTE: old matrix implementation of stride is different to eigen. const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (ops::GetDesiredBlockDim(input_width)) { #ifdef PADDLE_WITH_HIP FIXED_BLOCK_DIM( ops::KeMatrixTopK<T, 20, kBlockDim> <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #else FIXED_BLOCK_DIM( ops::KeMatrixTopK<T, 5, kBlockDim> <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(output_data, k, indices_data, input_data, input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #endif default: PADDLE_THROW(errors::Fatal( "the input data shape has error in the topk cuda kernel.")); } } else { // if get topK not from the last axis, will tranpose the tensor and get // TopK // first step, prepare the trans args for the tranpose std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); phi::DDim trans_dims(in_dims); phi::DDim trans_out_dims(out->dims()); for (int i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; trans_out_dims[i] = out_dims[trans[i]]; } // second step, tranpose the input DenseTensor trans_input; trans_input.Resize(trans_dims); dev_ctx.template Alloc<T>(&trans_input); int ndims = trans.size(); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, *input, &trans_input, trans); // third step, calcluate the topk // allocate the tmp cuda memory for the tmp result DenseTensor trans_ind; DenseTensor trans_out; trans_ind.Resize(trans_out_dims); trans_out.Resize(trans_out_dims); dev_ctx.template Alloc<int64_t>(&trans_ind); dev_ctx.template Alloc<T>(&trans_out); const int64_t input_height = phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; if (k > input_width) k = input_width; // The conclusion is drawn from the data through multiple sets of // statistics if (input_width >= 128 && k >= input_width * 0.75) { auto* ctx = reinterpret_cast<const phi::GPUContext*>(&dev_ctx); if (ops::SortTopk<T>(*ctx, &trans_input, input_width, input_height, k, &trans_out, &trans_ind, largest)) { // last step, tranpose back the indices and output funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, out, trans); return; } else { VLOG(4) << "TopKOP: Some errors happened when use cub sorting, use " "default topk kernel."; } } const int kMaxHeight = 2048; int gridx = input_height < kMaxHeight ? input_height : kMaxHeight; switch (ops::GetDesiredBlockDim(input_width)) { #ifdef PADDLE_WITH_HIP FIXED_BLOCK_DIM( ops::KeMatrixTopK<T, 20, kBlockDim> <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(trans_out.data<T>(), k, trans_ind.data<int64_t>(), trans_input.data<T>(), input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #else FIXED_BLOCK_DIM( ops::KeMatrixTopK<T, 5, kBlockDim> <<<gridx, kBlockDim, 0, dev_ctx.stream()>>>(trans_out.data<T>(), k, trans_ind.data<int64_t>(), trans_input.data<T>(), input_width, input_width, static_cast<int>(k), gridx, input_height, largest)); #endif default: PADDLE_THROW(errors::Fatal( "the input data shape has error in the topk cuda kernel.")); } // last step, tranpose back the indices and output funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, out, trans); } } #undef FIXED_BLOCK_DIM_BASE #undef FIXED_BLOCK_DIM } // namespace phi PD_REGISTER_KERNEL(top_k, GPU, ALL_LAYOUT, phi::TopkKernel, float, double, int, int64_t, phi::dtype::float16) {}
d638bb41c2791b9517ab012157c25f390d8fa261.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kLessThanScalar.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; hipMalloc(&mat, XSIZE*YSIZE); float val = 1; float *target = NULL; hipMalloc(&target, XSIZE*YSIZE); unsigned int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kLessThanScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,val,target,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kLessThanScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,val,target,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kLessThanScalar), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,val,target,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d638bb41c2791b9517ab012157c25f390d8fa261.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kLessThanScalar.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; cudaMalloc(&mat, XSIZE*YSIZE); float val = 1; float *target = NULL; cudaMalloc(&target, XSIZE*YSIZE); unsigned int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kLessThanScalar<<<gridBlock,threadBlock>>>(mat,val,target,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kLessThanScalar<<<gridBlock,threadBlock>>>(mat,val,target,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kLessThanScalar<<<gridBlock,threadBlock>>>(mat,val,target,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
99bef435a89a7a19339305f805dc987819b0334c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdbool.h> #undef SEARCH_ALL_THE_BEST #undef PACKED /**/ #undef COLLECT_LOG #define BLOCK_DIM (32) /* NOTE: broken when more than 32 */ #define N_INIT_DISTRIBUTION (BLOCK_DIM * 64) #define STACK_BUF_LEN (48 * (BLOCK_DIM/DIR_N)) /* XXX: should be defined dynamically, but hipMalloc after hipFree fails */ #define MAX_BUF_RATIO (256) #define STATE_WIDTH 5 #define STATE_N (STATE_WIDTH * STATE_WIDTH) typedef unsigned char uchar; typedef signed char Direction; #define dir_reverse(dir) ((Direction)(3 - (dir))) #define DIR_N 4 #define DIR_FIRST 0 /* this order is not Burns', but Korf's*/ #define DIR_UP 0 #define DIR_LEFT 1 #define DIR_RIGHT 2 #define DIR_DOWN 3 #define POS_X(pos) ((pos) % STATE_WIDTH) #define POS_Y(pos) ((pos) / STATE_WIDTH) typedef struct state_tag { #ifndef PACKED uchar tile[STATE_N]; uchar inv[STATE_N]; #else unsigned long long tile; #endif uchar empty; uchar depth; Direction parent_dir; uchar h[4], rh[4]; } d_State; /* PDB */ #define TABLESIZE 244140625 /* bytes in direct-access database array (25^6) */ static __device__ unsigned char *h0; /* heuristic tables for pattern databases */ static __device__ unsigned char *h1; static __device__ __constant__ const int whichpat[25] = {0,0,0,1,1,0,0,0,1,1,2,2,0,1,1,2,2,3,3,3,2,2,3,3,3}; static __device__ __constant__ const int whichrefpat[25] = {0,0,2,2,2,0,0,2,2,2,0,0,0,3,3,1,1,1,3,3,1,1,1,3,3}; #define inv (state->inv) /* the position of each tile in order, reflected about the main diagonal */ static __device__ __constant__ const int ref[] = {0,5,10,15,20,1,6,11,16,21,2,7,12,17,22,3,8,13,18,23,4,9,14,19,24}; static __device__ __constant__ const int rot90[] = {20,15,10,5,0,21,16,11,6,1,22,17,12,7,2,23,18,13,8,3,24,19,14,9,4}; static __device__ __constant__ const int rot90ref[] = {20,21,22,23,24,15,16,17,18,19,10,11,12,13,14,5,6,7,8,9,0,1,2,3,4}; static __device__ __constant__ const int rot180[] = {24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}; static __device__ __constant__ const int rot180ref[] = {24,19,14,9,4,23,18,13,8,3,22,17,12,7,2,21,16,11,6,1,20,15,10,5,0}; static __device__ unsigned int hash0(d_State *state) { int hashval; /* index into heuristic table */ hashval = ((((inv[1]*STATE_N+inv[2])*STATE_N+inv[5])*STATE_N+inv[6])*STATE_N+inv[7])*STATE_N+inv[12]; return (h0[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hashref0(d_State *state) { int hashval; /* index into heuristic table */ hashval = (((((ref[inv[5]] * STATE_N + ref[inv[10]]) * STATE_N + ref[inv[1]]) * STATE_N + ref[inv[6]]) * STATE_N + ref[inv[11]]) * STATE_N + ref[inv[12]]); return (h0[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hash1(d_State *state) { int hashval; /* index into heuristic table */ hashval = ((((inv[3]*STATE_N+inv[4])*STATE_N+inv[8])*STATE_N+inv[9])*STATE_N+inv[13])*STATE_N+inv[14]; return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hashref1(d_State *state) { int hashval; /* index into heuristic table */ hashval = (((((ref[inv[15]] * STATE_N + ref[inv[20]]) * STATE_N + ref[inv[16]]) * STATE_N + ref[inv[21]]) * STATE_N + ref[inv[17]]) * STATE_N + ref[inv[22]]); return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hash2(d_State *state) { int hashval; /* index into heuristic table */ hashval = ((((rot180[inv[21]] * STATE_N + rot180[inv[20]]) * STATE_N + rot180[inv[16]]) * STATE_N + rot180[inv[15]]) * STATE_N + rot180[inv[11]]) * STATE_N + rot180[inv[10]]; return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hashref2(d_State *state) { int hashval; /* index into heuristic table */ hashval = (((((rot180ref[inv[9]] * STATE_N + rot180ref[inv[4]]) * STATE_N + rot180ref[inv[8]]) * STATE_N + rot180ref[inv[3]]) * STATE_N + rot180ref[inv[7]]) * STATE_N + rot180ref[inv[2]]); return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hash3(d_State *state) { int hashval; /* index into heuristic table */ hashval = ((((rot90[inv[19]] * STATE_N + rot90[inv[24]]) * STATE_N + rot90[inv[18]]) * STATE_N + rot90[inv[23]]) * STATE_N + rot90[inv[17]]) * STATE_N + rot90[inv[22]]; return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hashref3(d_State *state) { int hashval; /* index into heuristic table */ hashval = (((((rot90ref[inv[23]] * STATE_N + rot90ref[inv[24]]) * STATE_N + rot90ref[inv[18]]) * STATE_N + rot90ref[inv[19]]) * STATE_N + rot90ref[inv[13]]) * STATE_N + rot90ref[inv[14]]); return (h1[hashval]); /* total moves for this pattern */ } #undef inv typedef unsigned int (*HashFunc)(d_State *state); __device__ HashFunc hash[] = {hash0, hash1, hash2, hash3}, rhash[] = {hashref0, hashref1, hashref2, hashref3}; typedef struct search_stat_tag { bool solved; int len; unsigned long long int loads; #ifdef COLLECT_LOG unsigned long long int nodes_expanded; #endif } search_stat; typedef struct input_tag { uchar tiles[STATE_N]; int init_depth; Direction parent_dir; } Input; /* state implementation */ #define state_get_h(s) ((s)->h[0] + (s)->h[1] + (s)->h[2] + (s)->h[3]) #define state_get_rh(s) ((s)->rh[0] + (s)->rh[1] + (s)->rh[2] + (s)->rh[3]) #define state_calc_h(s) (max(state_get_h(s), state_get_rh(s))) #ifndef PACKED #define state_tile_get(s, i) ((s)->tile[i]) #define state_tile_set(s, i, v) ((s)->tile[i] = (v)) #define state_inv_set(s, i, v) ((s)->inv[(i)] = (v)) #else #define STATE_TILE_BITS 4 #define STATE_TILE_MASK ((1ull << STATE_TILE_BITS) - 1) #define state_tile_ofs(i) (i << 2) #define state_tile_get(i) \ ((state->tile & (STATE_TILE_MASK << state_tile_ofs(i))) >> \ state_tile_ofs(i)) #define state_tile_set(i, val) \ do \ { \ state->tile &= ~((STATE_TILE_MASK) << state_tile_ofs(i)); \ state->tile |= ((unsigned long long) val) << state_tile_ofs(i); \ } while (0) #endif #define distance(i, j) ((i) > (j) ? (i) - (j) : (j) - (i)) __device__ static void state_init(d_State *state, Input *input) { state->depth = input->init_depth; state->parent_dir = input->parent_dir; for (int i = 0; i < STATE_N; ++i) { if (input->tiles[i] == 0) state->empty = i; state_tile_set(state, i, input->tiles[i]); state_inv_set(state, input->tiles[i], i); } for (int i = 0; i < 4; i++) { state->h[i] = hash[i](state); state->rh[i] = rhash[i](state); } } __device__ static inline bool state_is_goal(d_State state) { return state_get_h(&state) == 0; } __device__ static inline int state_get_f(d_State state) { return state.depth + state_calc_h(&state); } __device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N]; __device__ static inline bool state_movable(d_State state, Direction dir) { return movable_table_shared[state.empty][dir]; } __device__ __constant__ const static int pos_diff_table[DIR_N] = { -STATE_WIDTH, -1, 1, +STATE_WIDTH}; __device__ static inline bool state_move(d_State *state, Direction dir, int f_limit) { int new_empty = state->empty + pos_diff_table[dir]; int opponent = state_tile_get(state, new_empty); state_tile_set(state, state->empty, opponent); state_inv_set(state, opponent, state->empty); int pat = whichpat[opponent]; state->h[pat] = hash[pat](state); if (state->depth + 1 + state_get_h(state) <= f_limit) { int rpat = whichrefpat[opponent]; HashFunc rh; if (pat == 0) rh = rpat == 0 ? rhash[0] : rhash[2]; else if (pat == 1) rh = rpat == 2 ? rhash[2] : rhash[3]; else if (pat == 2) rh = rpat == 0 ? rhash[0] : rhash[1]; else rh = rpat == 1 ? rhash[1] : rhash[3]; state->rh[rpat] = rh(state); if (state->depth + 1 + state_get_rh(state) <= f_limit) { state->empty = new_empty; state->parent_dir = dir; ++state->depth; return true; } } return false; } /* stack implementation */ typedef struct div_stack_tag { unsigned int n; d_State buf[STACK_BUF_LEN]; } d_Stack; __device__ static inline bool stack_is_empty(d_Stack *stack) { bool ret = (stack->n == 0); __syncthreads(); return ret; } __device__ static inline void stack_put(d_Stack *stack, d_State *state, bool put) { if (put) { unsigned int i = atomicInc( &stack->n, UINT_MAX); /* slow? especially in old CC environment */ stack->buf[i] = *state; } __syncthreads(); } __device__ static inline bool stack_pop(d_Stack *stack, d_State *state) { int tid = threadIdx.x; int i = (int) stack->n - 1 - (int) (tid >> 2); if (i >= 0) *state = stack->buf[i]; __syncthreads(); if (tid == 0) stack->n = stack->n >= BLOCK_DIM / DIR_N ? stack->n - BLOCK_DIM / DIR_N : 0; __syncthreads(); return i >= 0; } //__device__ __shared__ Direction candidate_dir_table[4][3] = {} /* * solver implementation */ __device__ static void idas_internal(d_Stack *stack, int f_limit, search_stat *stat) { d_State state; unsigned long long int loop_cnt = 0; #ifdef COLLECT_LOG unsigned long long int nodes_expanded = 0; #endif if (threadIdx.x == 0) stat->solved = false; for (;;) { if (stack_is_empty(stack)) { stat->loads = loop_cnt; #ifdef COLLECT_LOG atomicAdd(&stat->nodes_expanded, nodes_expanded); #endif break; } ++loop_cnt; bool found = stack_pop(stack, &state), put = false; if (found) { Direction dir = threadIdx.x & 3; #ifdef COLLECT_LOG nodes_expanded++; #endif /* NOTE: candidate_dir_table may be effective to avoid divergence */ if (state.parent_dir == dir_reverse(dir)) continue; if (state_movable(state, dir)) { if (state_move(&state, dir, f_limit)) { if (state_is_goal(state)) { #ifndef SEARCH_ALL_THE_BEST asm("trap;"); #else stat->loads = loop_cnt; stat->len = state.depth; stat->solved = true; #endif #ifdef COLLECT_LOG atomicAdd(&stat->nodes_expanded, nodes_expanded); #endif } else put = true; } } } stack_put(stack, &state, put); } } __global__ void idas_kernel(Input *input, search_stat *stat, int f_limit, signed char *h_diff_table, bool *movable_table, unsigned char *h0_ptr, unsigned char *h1_ptr, d_Stack *stack_for_all) { //__shared__ d_Stack stack; int tid = threadIdx.x; int bid = blockIdx.x; d_Stack *stack = &(stack_for_all[bid]); if (tid == 0) { h0 = h0_ptr; h1 = h1_ptr; stat[bid].loads = 0; } d_State state; state_init(&state, &input[bid]); if (state_get_f(state) > f_limit) return; if (tid == 0) { stack->buf[0] = state; stack->n = 1; } for (int i = tid; i < STATE_N * DIR_N; i += blockDim.x) if (i < STATE_N * DIR_N) movable_table_shared[i / DIR_N][i % DIR_N] = movable_table[i]; __syncthreads(); idas_internal(stack, f_limit, &stat[bid]); } /* host library implementation */ #include <errno.h> #include <limits.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #ifndef UNABLE_LOG #define elog(...) fprintf(stderr, __VA_ARGS__) #else #define elog(...) ; #endif void * palloc(size_t size) { void *ptr = malloc(size); if (!ptr) elog("malloc failed\n"); return ptr; } void * repalloc(void *old_ptr, size_t new_size) { void *ptr = realloc(old_ptr, new_size); if (!ptr) elog("realloc failed\n"); return ptr; } void pfree(void *ptr) { if (!ptr) elog("empty ptr\n"); free(ptr); } #include <assert.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> typedef unsigned char idx_t; /* * [0,0] [1,0] [2,0] [3,0] * [0,1] [1,1] [2,1] [3,1] * [0,2] [1,2] [2,2] [3,2] * [0,3] [1,3] [2,3] [3,3] */ /* * goal state is * [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] */ typedef struct state_tag_cpu { int depth; /* XXX: needed? */ uchar pos[STATE_WIDTH][STATE_WIDTH]; idx_t i, j; /* pos of empty */ Direction parent_dir; int h_value; } * State; #define v(state, i, j) ((state)->pos[i][j]) #define ev(state) (v(state, state->i, state->j)) #define lv(state) (v(state, state->i - 1, state->j)) #define dv(state) (v(state, state->i, state->j + 1)) #define rv(state) (v(state, state->i + 1, state->j)) #define uv(state) (v(state, state->i, state->j - 1)) static uchar from_x[STATE_WIDTH * STATE_WIDTH], from_y[STATE_WIDTH * STATE_WIDTH]; static inline void fill_from_xy(State from) { for (idx_t x = 0; x < STATE_WIDTH; ++x) for (idx_t y = 0; y < STATE_WIDTH; ++y) { from_x[v(from, x, y)] = x; from_y[v(from, x, y)] = y; } } static inline int heuristic_manhattan_distance(State from) { int h_value = 0; fill_from_xy(from); for (idx_t i = 1; i < STATE_N; ++i) { h_value += distance(from_x[i], POS_X(i)); h_value += distance(from_y[i], POS_Y(i)); } return h_value; } bool state_is_goal(State state) { return state->h_value == 0; } static inline State state_alloc(void) { return (State) palloc(sizeof(struct state_tag_cpu)); } static inline void state_free(State state) { pfree(state); } State state_init(uchar v_list[STATE_WIDTH * STATE_WIDTH], int init_depth) { State state = state_alloc(); int cnt = 0; state->depth = init_depth; state->parent_dir = (Direction) -1; for (idx_t j = 0; j < STATE_WIDTH; ++j) for (idx_t i = 0; i < STATE_WIDTH; ++i) { if (v_list[cnt] == 0) { state->i = i; state->j = j; } v(state, i, j) = v_list[cnt++]; } state->h_value = heuristic_manhattan_distance(state); return state; } void state_fini(State state) { state_free(state); } State state_copy(State src) { State dst = state_alloc(); memcpy(dst, src, sizeof(*src)); return dst; } static inline bool state_left_movable(State state) { return state->i != 0; } static inline bool state_down_movable(State state) { return state->j != STATE_WIDTH - 1; } static inline bool state_right_movable(State state) { return state->i != STATE_WIDTH - 1; } static inline bool state_up_movable(State state) { return state->j != 0; } bool state_movable(State state, Direction dir) { return (dir != DIR_LEFT || state_left_movable(state)) && (dir != DIR_DOWN || state_down_movable(state)) && (dir != DIR_RIGHT || state_right_movable(state)) && (dir != DIR_UP || state_up_movable(state)); } #define h_diff(who, opponent, dir) \ (h_diff_table[((who) * STATE_N * DIR_N) + ((opponent) << 2) + (dir)]) static int h_diff_table[STATE_N * STATE_N * DIR_N]; void state_move(State state, Direction dir) { idx_t who; assert(state_movable(state, dir)); switch (dir) { case DIR_LEFT: who = ev(state) = lv(state); state->i--; break; case DIR_DOWN: who = ev(state) = dv(state); state->j++; break; case DIR_RIGHT: who = ev(state) = rv(state); state->i++; break; case DIR_UP: who = ev(state) = uv(state); state->j--; break; default: elog("unexpected direction"); assert(false); } state->h_value = state->h_value + h_diff(who, state->i + state->j * STATE_WIDTH, dir_reverse(dir)); state->parent_dir = dir; } bool state_pos_equal(State s1, State s2) { for (idx_t i = 0; i < STATE_WIDTH; ++i) for (idx_t j = 0; j < STATE_WIDTH; ++j) if (v(s1, i, j) != v(s2, i, j)) return false; return true; } size_t state_hash(State state) { /* FIXME: for A* */ size_t hash_value = 0; for (idx_t i = 0; i < STATE_WIDTH; ++i) for (idx_t j = 0; j < STATE_WIDTH; ++j) hash_value ^= (v(state, i, j) << ((i * 3 + j) << 2)); return hash_value; } int state_get_hvalue(State state) { return state->h_value; } int state_get_depth(State state) { return state->depth; } static void state_dump(State state) { elog("LOG(state): depth=%d, h=%d, f=%d, ", state->depth, state->h_value, state->depth + state->h_value); for (int i = 0; i < STATE_N; ++i) elog("%d%c", i == state->i + STATE_WIDTH * state->j ? 0 : state->pos[i % STATE_WIDTH][i / STATE_WIDTH], i == STATE_N - 1 ? '\n' : ','); } #include <stddef.h> #include <stdint.h> #include <string.h> #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif typedef enum { HT_SUCCESS = 0, HT_FAILED_FOUND, HT_FAILED_NOT_FOUND, } HTStatus; /* XXX: hash function for State should be surveyed */ inline static size_t hashfunc(State key) { return state_hash(key); } typedef struct ht_entry_tag *HTEntry; struct ht_entry_tag { HTEntry next; State key; int value; }; static HTEntry ht_entry_init(State key) { HTEntry entry = (HTEntry) palloc(sizeof(*entry)); entry->key = state_copy(key); entry->next = NULL; return entry; } static void ht_entry_fini(HTEntry entry) { pfree(entry); } typedef struct ht_tag { size_t n_bins; size_t n_elems; HTEntry *bin; } * HT; static bool ht_rehash_required(HT ht) { return ht->n_bins <= ht->n_elems; /* TODO: local policy is also needed */ } static size_t calc_n_bins(size_t required) { /* NOTE: n_bins is used for mask and hence it should be pow of 2, fon now */ size_t size = 1; assert(required > 0); while (required > size) size <<= 1; return size; } HT ht_init(size_t init_size_hint) { size_t n_bins = calc_n_bins(init_size_hint); HT ht = (HT) palloc(sizeof(*ht)); ht->n_bins = n_bins; ht->n_elems = 0; assert(sizeof(*ht->bin) <= SIZE_MAX / n_bins); ht->bin = (HTEntry *) palloc(sizeof(*ht->bin) * n_bins); memset(ht->bin, 0, sizeof(*ht->bin) * n_bins); return ht; } static void ht_rehash(HT ht) { HTEntry *new_bin; size_t new_size = ht->n_bins << 1; assert(ht->n_bins<SIZE_MAX>> 1); new_bin = (HTEntry *) palloc(sizeof(*new_bin) * new_size); memset(new_bin, 0, sizeof(*new_bin) * new_size); for (size_t i = 0; i < ht->n_bins; ++i) { HTEntry entry = ht->bin[i]; while (entry) { HTEntry next = entry->next; size_t idx = hashfunc(entry->key) & (new_size - 1); entry->next = new_bin[idx]; new_bin[idx] = entry; entry = next; } } pfree(ht->bin); ht->n_bins = new_size; ht->bin = new_bin; } void ht_fini(HT ht) { for (size_t i = 0; i < ht->n_bins; ++i) { HTEntry entry = ht->bin[i]; while (entry) { HTEntry next = entry->next; state_fini(entry->key); ht_entry_fini(entry); entry = next; } } pfree(ht->bin); pfree(ht); } HTStatus ht_insert(HT ht, State key, int **value) { size_t i; HTEntry entry, new_entry; if (ht_rehash_required(ht)) ht_rehash(ht); i = hashfunc(key) & (ht->n_bins - 1); entry = ht->bin[i]; while (entry) { if (state_pos_equal(key, entry->key)) { *value = &entry->value; return HT_FAILED_FOUND; } entry = entry->next; } new_entry = ht_entry_init(key); new_entry->next = ht->bin[i]; ht->bin[i] = new_entry; *value = &new_entry->value; assert(ht->n_elems < SIZE_MAX); ht->n_elems++; return HT_SUCCESS; } /* * Priority Queue implementation */ #include <assert.h> #include <stdint.h> typedef struct pq_entry_tag { State state; int f, g; } PQEntryData; typedef PQEntryData *PQEntry; /* tiebreaking is done comparing g value */ static inline bool pq_entry_higher_priority(PQEntry e1, PQEntry e2) { return e1->f < e2->f || (e1->f == e2->f && e1->g >= e2->g); } /* * NOTE: * This priority queue is implemented doubly reallocated array. * It will only extend and will not shrink, for now. * It may be improved by using array of layers of iteratively widened array */ typedef struct pq_tag { size_t n_elems; size_t capa; PQEntryData *array; } * PQ; static inline size_t calc_init_capa(size_t capa_hint) { size_t capa = 1; assert(capa_hint > 0); while (capa < capa_hint) capa <<= 1; return capa - 1; } PQ pq_init(size_t init_capa_hint) { PQ pq = (PQ) palloc(sizeof(*pq)); pq->n_elems = 0; pq->capa = calc_init_capa(init_capa_hint); assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData)); pq->array = (PQEntryData *) palloc(sizeof(PQEntryData) * pq->capa); return pq; } void pq_fini(PQ pq) { for (size_t i = 0; i < pq->n_elems; ++i) state_fini(pq->array[i].state); pfree(pq->array); pfree(pq); } static inline bool pq_is_full(PQ pq) { assert(pq->n_elems <= pq->capa); return pq->n_elems == pq->capa; } static inline void pq_extend(PQ pq) { pq->capa = (pq->capa << 1) + 1; assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData)); pq->array = (PQEntryData *) repalloc(pq->array, sizeof(PQEntryData) * pq->capa); } static inline void pq_swap_entry(PQ pq, size_t i, size_t j) { PQEntryData tmp = pq->array[i]; pq->array[i] = pq->array[j]; pq->array[j] = tmp; } static inline size_t pq_up(size_t i) { /* NOTE: By using 1-origin, it may be written more simply, i >> 1 */ return (i - 1) >> 1; } static inline size_t pq_left(size_t i) { return (i << 1) + 1; } static void heapify_up(PQ pq) { for (size_t i = pq->n_elems; i > 0;) { size_t ui = pq_up(i); assert(i > 0); if (!pq_entry_higher_priority(&pq->array[i], &pq->array[ui])) break; pq_swap_entry(pq, i, ui); i = ui; } } void pq_put(PQ pq, State state, int f, int g) { if (pq_is_full(pq)) pq_extend(pq); pq->array[pq->n_elems].state = state_copy(state); pq->array[pq->n_elems].f = f; /* this may be abundant */ pq->array[pq->n_elems].g = g; heapify_up(pq); ++pq->n_elems; } static void heapify_down(PQ pq) { size_t sentinel = pq->n_elems; for (size_t i = 0;;) { size_t ri, li = pq_left(i); if (li >= sentinel) break; ri = li + 1; if (ri >= sentinel) { if (pq_entry_higher_priority(&pq->array[li], &pq->array[i])) pq_swap_entry(pq, i, li); /* Reached the bottom */ break; } /* NOTE: If p(ri) == p(li), it may be good to go right * since the filling order is left-first */ if (pq_entry_higher_priority(&pq->array[li], &pq->array[ri])) { if (!pq_entry_higher_priority(&pq->array[li], &pq->array[i])) break; pq_swap_entry(pq, i, li); i = li; } else { if (!pq_entry_higher_priority(&pq->array[ri], &pq->array[i])) break; pq_swap_entry(pq, i, ri); i = ri; } } } State pq_pop(PQ pq) { State ret_state; if (pq->n_elems == 0) return NULL; ret_state = pq->array[0].state; --pq->n_elems; pq->array[0] = pq->array[pq->n_elems]; heapify_down(pq); return ret_state; } void pq_dump(PQ pq) { elog("%s: n_elems=%zu, capa=%zu\n", __func__, pq->n_elems, pq->capa); for (size_t i = 0, cr_required = 1; i < pq->n_elems; i++) { if (i == cr_required) { elog("\n"); cr_required = (cr_required << 1) + 1; } elog("%d,", pq->array[i].f); elog("%d ", pq->array[i].g); } elog("\n"); } #include <stdlib.h> #include <string.h> int rrand(int m) { return (int) ((double) m * (rand() / (RAND_MAX + 1.0))); } void shuffle_input(Input input[], int n_inputs) { Input tmp; size_t n = n_inputs; while (n > 1) { size_t k = rrand(n--); memcpy(&tmp, &input[n], sizeof(Input)); memcpy(&input[n], &input[k], sizeof(Input)); memcpy(&input[k], &tmp, sizeof(Input)); } } static HT closed; bool distribute_astar(State init_state, Input input[], int distr_n, int *cnt_inputs, int *min_fvalue) { int cnt = 0; State state; PQ q = pq_init(distr_n + 10); HTStatus ht_status; int * ht_value; bool solved = false; closed = ht_init(10000); ht_status = ht_insert(closed, init_state, &ht_value); *ht_value = 0; pq_put(q, state_copy(init_state), state_get_hvalue(init_state), 0); ++cnt; while ((state = pq_pop(q))) { --cnt; if (state_is_goal(state)) { solved = true; break; } ht_status = ht_insert(closed, state, &ht_value); if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state)) { state_fini(state); continue; } else *ht_value = state_get_depth(state); for (int dir = 0; dir < DIR_N; ++dir) { if (state->parent_dir != dir_reverse(dir) && state_movable(state, (Direction) dir)) { State next_state = state_copy(state); state_move(next_state, (Direction) dir); next_state->depth++; ht_status = ht_insert(closed, next_state, &ht_value); if (ht_status == HT_FAILED_FOUND && *ht_value <= state_get_depth(next_state)) state_fini(next_state); else { ++cnt; *ht_value = state_get_depth(next_state); pq_put(q, next_state, *ht_value + state_get_hvalue(next_state), *ht_value); } } } state_fini(state); if (cnt >= distr_n) break; } *cnt_inputs = cnt; elog("LOG: init_distr, cnt=%d\n", cnt); if (!solved) { int minf = INT_MAX; for (int id = 0; id < cnt; ++id) { State state = pq_pop(q); assert(state); for (int i = 0; i < STATE_N; ++i) input[id].tiles[i] = state->pos[i % STATE_WIDTH][i / STATE_WIDTH]; input[id].tiles[state->i + (state->j * STATE_WIDTH)] = 0; input[id].init_depth = state_get_depth(state); input[id].parent_dir = state->parent_dir; if (minf > state_get_depth(state) + state_get_hvalue(state)) minf = state_get_depth(state) + state_get_hvalue(state); } assert(pq_pop(q) == NULL); // shuffle_input(input, cnt); *min_fvalue = minf; } pq_fini(q); return solved; } static int input_devide(Input input[], search_stat stat[], int i, int devide_n, int tail, int *buf_len) { int cnt = 0; int * ht_value; State state = state_init(input[i].tiles, input[i].init_depth); state->parent_dir = input[i].parent_dir; PQ pq = pq_init(devide_n); HTStatus ht_status; pq_put(pq, state, state_get_hvalue(state), 0); ++cnt; assert(devide_n > 0); while ((state = pq_pop(pq))) { --cnt; if (state_is_goal(state)) { /* It may not be optimal goal */ pq_put(pq, state, state_get_depth(state) + state_get_hvalue(state), state_get_depth(state)); ++cnt; break; } ht_status = ht_insert(closed, state, &ht_value); if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state)) { state_fini(state); continue; } else *ht_value = state_get_depth(state); for (int dir = 0; dir < DIR_N; ++dir) { if (state->parent_dir != dir_reverse(dir) && state_movable(state, (Direction) dir)) { State next_state = state_copy(state); state_move(next_state, (Direction) dir); next_state->depth++; ht_status = ht_insert(closed, next_state, &ht_value); if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(next_state)) state_fini(next_state); else { ++cnt; *ht_value = state_get_depth(next_state); pq_put(pq, next_state, *ht_value + state_get_hvalue(next_state), *ht_value); } } } state_fini(state); if (cnt >= devide_n) break; } int new_buf_len = *buf_len; while (tail + cnt >= new_buf_len) new_buf_len <<= 1; if (new_buf_len != *buf_len) { *buf_len = new_buf_len; repalloc(input, sizeof(*input) * new_buf_len); elog("LOG: host buf resize\n"); } input[i] = input[tail - 1]; for (int id = 0; id < cnt; ++id) { int ofs = tail - 1 + id; State state = pq_pop(pq); assert(state); for (int j = 0; j < STATE_N; ++j) input[ofs].tiles[j] = state->pos[j % STATE_WIDTH][j / STATE_WIDTH]; input[ofs].tiles[state->i + (state->j * STATE_WIDTH)] = 0; input[ofs].init_depth = state_get_depth(state); input[ofs].parent_dir = state->parent_dir; } pq_fini(pq); return cnt - 1; } /* main */ #include <errno.h> #include <stdio.h> #include <stdlib.h> #define exit_failure(...) \ do \ { \ printf(__VA_ARGS__); \ exit(EXIT_FAILURE); \ } while (0) static int pop_int_from_str(const char *str, char **end_ptr) { long int rv = strtol(str, end_ptr, 0); errno = 0; if (errno != 0) exit_failure("%s: %s cannot be converted into long\n", __func__, str); else if (end_ptr && str == *end_ptr) exit_failure("%s: reach end of string", __func__); if (rv > INT_MAX || rv < INT_MIN) exit_failure("%s: too big number, %ld\n", __func__, rv); return (int) rv; } #define MAX_LINE_LEN 100 static void load_state_from_file(const char *fname, uchar *s) { FILE *fp; char str[MAX_LINE_LEN]; char *str_ptr = str, *end_ptr; fp = fopen(fname, "r"); if (!fp) exit_failure("%s: %s cannot be opened\n", __func__, fname); if (!fgets(str, MAX_LINE_LEN, fp)) exit_failure("%s: fgets failed\n", __func__); for (int i = 0; i < STATE_N; ++i) { s[i] = pop_int_from_str(str_ptr, &end_ptr); str_ptr = end_ptr; } fclose(fp); } #undef MAX_LINE_LEN #define CUDA_CHECK(call) \ do \ { \ const hipError_t e = call; \ if (e != hipSuccess) \ exit_failure("Error: %s:%d code:%d, reason: %s\n", __FILE__, \ __LINE__, e, hipGetErrorString(e)); \ } while (0) __host__ static void * cudaPalloc(size_t size) { void *ptr; CUDA_CHECK(hipMalloc(&ptr, size)); return ptr; } __host__ static void cudaPfree(void *ptr) { CUDA_CHECK(hipFree(ptr)); } #define h_d_t(op, i, dir) \ (h_diff_table[(op) *STATE_N * DIR_N + (i) *DIR_N + (dir)]) __host__ static void init_mdist(signed char h_diff_table[]) { for (int opponent = 0; opponent < STATE_N; ++opponent) { int goal_x = POS_X(opponent), goal_y = POS_Y(opponent); for (int i = 0; i < STATE_N; ++i) { int from_x = POS_X(i), from_y = POS_Y(i); for (uchar dir = 0; dir < DIR_N; ++dir) { if (dir == DIR_LEFT) h_d_t(opponent, i, dir) = goal_x > from_x ? -1 : 1; if (dir == DIR_RIGHT) h_d_t(opponent, i, dir) = goal_x < from_x ? -1 : 1; if (dir == DIR_UP) h_d_t(opponent, i, dir) = goal_y > from_y ? -1 : 1; if (dir == DIR_DOWN) h_d_t(opponent, i, dir) = goal_y < from_y ? -1 : 1; } } } } #undef h_d_t #define m_t(i, d) (movable_table[(i) *DIR_N + (d)]) __host__ static void init_movable_table(bool movable_table[]) { for (int i = 0; i < STATE_N; ++i) for (unsigned int d = 0; d < DIR_N; ++d) { if (d == DIR_RIGHT) m_t(i, d) = (POS_X(i) < STATE_WIDTH - 1); else if (d == DIR_LEFT) m_t(i, d) = (POS_X(i) > 0); else if (d == DIR_DOWN) m_t(i, d) = (POS_Y(i) < STATE_WIDTH - 1); else if (d == DIR_UP) m_t(i, d) = (POS_Y(i) > 0); } } #undef m_t static FILE *infile; /* pointer to heuristic table file */ static unsigned char h_h0[TABLESIZE]; static unsigned char h_h1[TABLESIZE]; static __host__ void readfile(unsigned char table[]) { int pos[6]; /* positions of each pattern tile */ int index; /* direct access index */ for (pos[0] = 0; pos[0] < STATE_N; pos[0]++) { for (pos[1] = 0; pos[1] < STATE_N; pos[1]++) { if (pos[1] == pos[0]) continue; for (pos[2] = 0; pos[2] < STATE_N; pos[2]++) { if (pos[2] == pos[0] || pos[2] == pos[1]) continue; for (pos[3] = 0; pos[3] < STATE_N; pos[3]++) { if (pos[3] == pos[0] || pos[3] == pos[1] || pos[3] == pos[2]) continue; for (pos[4] = 0; pos[4] < STATE_N; pos[4]++) { if (pos[4] == pos[0] || pos[4] == pos[1] || pos[4] == pos[2] || pos[4] == pos[3]) continue; for (pos[5] = 0; pos[5] < STATE_N; pos[5]++) { if (pos[5] == pos[0] || pos[5] == pos[1] || pos[5] == pos[2] || pos[5] == pos[3] || pos[5] == pos[4]) continue; index = ((((pos[0]*25+pos[1])*25+pos[2])*25+pos[3])*25+pos[4])*25+pos[5]; table[index] = getc (infile); } } } } } } } static __host__ void pdb_load(void) { infile = fopen("pattern_1_2_5_6_7_12", "rb"); /* read 6-tile pattern database */ readfile (h_h0); /* read database and expand into direct-access array */ fclose(infile); printf ("pattern 1 2 5 6 7 12 read in\n"); infile = fopen("pattern_3_4_8_9_13_14", "rb"); /* read 6-tile pattern database */ readfile (h_h1); /* read database and expand into direct-access array */ fclose(infile); printf ("pattern 3 4 8 9 13 14 read in\n"); } // static char dir_char[] = {'U', 'R', 'L', 'D'}; #define INPUT_SIZE (sizeof(Input) * buf_len) #define STAT_SIZE (sizeof(search_stat) * buf_len) #define MOVABLE_TABLE_SIZE (sizeof(bool) * STATE_N * DIR_N) #define H_DIFF_TABLE_SIZE (STATE_N * STATE_N * DIR_N) #define INIT_STACK_SIZE (sizeof(d_Stack) * 100000) int main(int argc, char *argv[]) { int n_roots; int buf_len = N_INIT_DISTRIBUTION * MAX_BUF_RATIO; Input *input = (Input *) palloc(INPUT_SIZE), *d_input = (Input *) cudaPalloc(INPUT_SIZE); search_stat *stat = (search_stat *) palloc(STAT_SIZE), *d_stat = (search_stat *) cudaPalloc(STAT_SIZE); bool *movable_table = (bool *) palloc(MOVABLE_TABLE_SIZE), *d_movable_table = (bool *) cudaPalloc(MOVABLE_TABLE_SIZE); signed char *h_diff_table = (signed char *) palloc(H_DIFF_TABLE_SIZE), *d_h_diff_table = (signed char *) cudaPalloc(H_DIFF_TABLE_SIZE); unsigned char *d_h0 = (unsigned char *) cudaPalloc(TABLESIZE); unsigned char *d_h1 = (unsigned char *) cudaPalloc(TABLESIZE); d_Stack *stack_for_all = (d_Stack *) cudaPalloc(INIT_STACK_SIZE); int min_fvalue = 0; if (argc != 2) exit_failure("usage: bin/cumain <ifname>\n"); load_state_from_file(argv[1], input[0].tiles); pdb_load(); CUDA_CHECK(hipMemcpy(d_h0, h_h0, TABLESIZE, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(d_h1, h_h1, TABLESIZE, hipMemcpyHostToDevice)); { State init_state = state_init(input[0].tiles, 0); state_dump(init_state); if (distribute_astar(init_state, input, N_INIT_DISTRIBUTION, &n_roots, &min_fvalue)) { elog("solution is found by distributor\n"); goto solution_found; } state_fini(init_state); } init_mdist(h_diff_table); init_movable_table(movable_table); CUDA_CHECK(hipMemcpy(d_movable_table, movable_table, MOVABLE_TABLE_SIZE, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(d_h_diff_table, h_diff_table, H_DIFF_TABLE_SIZE, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemset(d_input, 0, INPUT_SIZE)); for (uchar f_limit = min_fvalue;; f_limit += 2) { CUDA_CHECK(hipMemset(d_stat, 0, STAT_SIZE)); CUDA_CHECK( hipMemcpy(d_input, input, INPUT_SIZE, hipMemcpyHostToDevice)); elog("f_limit=%d\n", (int) f_limit); hipLaunchKernelGGL(( idas_kernel), dim3(n_roots), dim3(BLOCK_DIM), 0, 0, d_input, d_stat, f_limit, d_h_diff_table, d_movable_table, d_h0, d_h1, stack_for_all); CUDA_CHECK( hipGetLastError()); /* asm trap is called when find solution */ CUDA_CHECK(hipMemcpy(stat, d_stat, STAT_SIZE, hipMemcpyDeviceToHost)); unsigned long long int loads_sum = 0; for (int i = 0; i < n_roots; ++i) loads_sum += stat[i].loads; #ifdef COLLECT_LOG elog("STAT: loop\n"); for (int i = 0; i < n_roots; ++i) elog("%lld, ", stat[i].loads); putchar('\n'); elog("STAT: nodes_expanded\n"); for (int i = 0; i < n_roots; ++i) elog("%lld, ", stat[i].nodes_expanded); putchar('\n'); elog("STAT: efficiency\n"); for (int i = 0; i < n_roots; ++i) if (stat[i].loads != 0) elog("%lld, ", stat[i].nodes_expanded / stat[i].loads); putchar('\n'); #endif int increased = 0; unsigned long long int loads_av = loads_sum / n_roots; int stat_cnt[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int i = 0; i < n_roots; ++i) { if (stat[i].loads < loads_av) stat_cnt[0]++; else if (stat[i].loads < 2 * loads_av) stat_cnt[1]++; else if (stat[i].loads < 4 * loads_av) stat_cnt[2]++; else if (stat[i].loads < 8 * loads_av) stat_cnt[3]++; else if (stat[i].loads < 16 * loads_av) stat_cnt[4]++; else if (stat[i].loads < 32 * loads_av) stat_cnt[5]++; else if (stat[i].loads < 64 * loads_av) stat_cnt[6]++; else if (stat[i].loads < 128 * loads_av) stat_cnt[7]++; else stat_cnt[8]++; int policy = loads_av == 0 ? stat[i].loads : (stat[i].loads - 1) / loads_av + 1; int buf_len_old = buf_len; if (policy > 1 && stat[i].loads > 10) increased += input_devide(input, stat, i, policy, n_roots + increased, &buf_len); if (buf_len != buf_len_old) { elog("XXX: fix MAX_BUF_RATIO\n"); stat = (search_stat *) repalloc(stat, STAT_SIZE); cudaPfree(d_input); cudaPfree(d_stat); d_input = (Input *) cudaPalloc(INPUT_SIZE); d_stat = (search_stat *) cudaPalloc(STAT_SIZE); } } elog("STAT: loads: sum=%lld, av=%lld\n", loads_sum, loads_av); elog("STAT: distr: av=%d, 2av=%d, 4av=%d, 8av=%d, 16av=%d, 32av=%d, " "64av=%d, 128av=%d, more=%d\n", stat_cnt[0], stat_cnt[1], stat_cnt[2], stat_cnt[3], stat_cnt[4], stat_cnt[5], stat_cnt[6], stat_cnt[7], stat_cnt[8]); n_roots += increased; elog("STAT: n_roots=%d(+%d)\n", n_roots, increased); #ifdef SEARCH_ALL_THE_BEST for (int i = 0; i < n_roots; ++i) if (stat[i].solved) { elog("find all the optimal solution(s), at depth=%d\n", stat[i].len); goto solution_found; } #endif } solution_found: cudaPfree(d_input); cudaPfree(d_stat); cudaPfree(d_movable_table); cudaPfree(d_h_diff_table); cudaPfree(d_h0); cudaPfree(d_h1); CUDA_CHECK(hipDeviceReset()); pfree(input); pfree(stat); pfree(movable_table); pfree(h_diff_table); return 0; }
99bef435a89a7a19339305f805dc987819b0334c.cu
#include <stdbool.h> #undef SEARCH_ALL_THE_BEST #undef PACKED /**/ #undef COLLECT_LOG #define BLOCK_DIM (32) /* NOTE: broken when more than 32 */ #define N_INIT_DISTRIBUTION (BLOCK_DIM * 64) #define STACK_BUF_LEN (48 * (BLOCK_DIM/DIR_N)) /* XXX: should be defined dynamically, but cudaMalloc after cudaFree fails */ #define MAX_BUF_RATIO (256) #define STATE_WIDTH 5 #define STATE_N (STATE_WIDTH * STATE_WIDTH) typedef unsigned char uchar; typedef signed char Direction; #define dir_reverse(dir) ((Direction)(3 - (dir))) #define DIR_N 4 #define DIR_FIRST 0 /* this order is not Burns', but Korf's*/ #define DIR_UP 0 #define DIR_LEFT 1 #define DIR_RIGHT 2 #define DIR_DOWN 3 #define POS_X(pos) ((pos) % STATE_WIDTH) #define POS_Y(pos) ((pos) / STATE_WIDTH) typedef struct state_tag { #ifndef PACKED uchar tile[STATE_N]; uchar inv[STATE_N]; #else unsigned long long tile; #endif uchar empty; uchar depth; Direction parent_dir; uchar h[4], rh[4]; } d_State; /* PDB */ #define TABLESIZE 244140625 /* bytes in direct-access database array (25^6) */ static __device__ unsigned char *h0; /* heuristic tables for pattern databases */ static __device__ unsigned char *h1; static __device__ __constant__ const int whichpat[25] = {0,0,0,1,1,0,0,0,1,1,2,2,0,1,1,2,2,3,3,3,2,2,3,3,3}; static __device__ __constant__ const int whichrefpat[25] = {0,0,2,2,2,0,0,2,2,2,0,0,0,3,3,1,1,1,3,3,1,1,1,3,3}; #define inv (state->inv) /* the position of each tile in order, reflected about the main diagonal */ static __device__ __constant__ const int ref[] = {0,5,10,15,20,1,6,11,16,21,2,7,12,17,22,3,8,13,18,23,4,9,14,19,24}; static __device__ __constant__ const int rot90[] = {20,15,10,5,0,21,16,11,6,1,22,17,12,7,2,23,18,13,8,3,24,19,14,9,4}; static __device__ __constant__ const int rot90ref[] = {20,21,22,23,24,15,16,17,18,19,10,11,12,13,14,5,6,7,8,9,0,1,2,3,4}; static __device__ __constant__ const int rot180[] = {24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0}; static __device__ __constant__ const int rot180ref[] = {24,19,14,9,4,23,18,13,8,3,22,17,12,7,2,21,16,11,6,1,20,15,10,5,0}; static __device__ unsigned int hash0(d_State *state) { int hashval; /* index into heuristic table */ hashval = ((((inv[1]*STATE_N+inv[2])*STATE_N+inv[5])*STATE_N+inv[6])*STATE_N+inv[7])*STATE_N+inv[12]; return (h0[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hashref0(d_State *state) { int hashval; /* index into heuristic table */ hashval = (((((ref[inv[5]] * STATE_N + ref[inv[10]]) * STATE_N + ref[inv[1]]) * STATE_N + ref[inv[6]]) * STATE_N + ref[inv[11]]) * STATE_N + ref[inv[12]]); return (h0[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hash1(d_State *state) { int hashval; /* index into heuristic table */ hashval = ((((inv[3]*STATE_N+inv[4])*STATE_N+inv[8])*STATE_N+inv[9])*STATE_N+inv[13])*STATE_N+inv[14]; return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hashref1(d_State *state) { int hashval; /* index into heuristic table */ hashval = (((((ref[inv[15]] * STATE_N + ref[inv[20]]) * STATE_N + ref[inv[16]]) * STATE_N + ref[inv[21]]) * STATE_N + ref[inv[17]]) * STATE_N + ref[inv[22]]); return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hash2(d_State *state) { int hashval; /* index into heuristic table */ hashval = ((((rot180[inv[21]] * STATE_N + rot180[inv[20]]) * STATE_N + rot180[inv[16]]) * STATE_N + rot180[inv[15]]) * STATE_N + rot180[inv[11]]) * STATE_N + rot180[inv[10]]; return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hashref2(d_State *state) { int hashval; /* index into heuristic table */ hashval = (((((rot180ref[inv[9]] * STATE_N + rot180ref[inv[4]]) * STATE_N + rot180ref[inv[8]]) * STATE_N + rot180ref[inv[3]]) * STATE_N + rot180ref[inv[7]]) * STATE_N + rot180ref[inv[2]]); return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hash3(d_State *state) { int hashval; /* index into heuristic table */ hashval = ((((rot90[inv[19]] * STATE_N + rot90[inv[24]]) * STATE_N + rot90[inv[18]]) * STATE_N + rot90[inv[23]]) * STATE_N + rot90[inv[17]]) * STATE_N + rot90[inv[22]]; return (h1[hashval]); /* total moves for this pattern */ } static __device__ unsigned int hashref3(d_State *state) { int hashval; /* index into heuristic table */ hashval = (((((rot90ref[inv[23]] * STATE_N + rot90ref[inv[24]]) * STATE_N + rot90ref[inv[18]]) * STATE_N + rot90ref[inv[19]]) * STATE_N + rot90ref[inv[13]]) * STATE_N + rot90ref[inv[14]]); return (h1[hashval]); /* total moves for this pattern */ } #undef inv typedef unsigned int (*HashFunc)(d_State *state); __device__ HashFunc hash[] = {hash0, hash1, hash2, hash3}, rhash[] = {hashref0, hashref1, hashref2, hashref3}; typedef struct search_stat_tag { bool solved; int len; unsigned long long int loads; #ifdef COLLECT_LOG unsigned long long int nodes_expanded; #endif } search_stat; typedef struct input_tag { uchar tiles[STATE_N]; int init_depth; Direction parent_dir; } Input; /* state implementation */ #define state_get_h(s) ((s)->h[0] + (s)->h[1] + (s)->h[2] + (s)->h[3]) #define state_get_rh(s) ((s)->rh[0] + (s)->rh[1] + (s)->rh[2] + (s)->rh[3]) #define state_calc_h(s) (max(state_get_h(s), state_get_rh(s))) #ifndef PACKED #define state_tile_get(s, i) ((s)->tile[i]) #define state_tile_set(s, i, v) ((s)->tile[i] = (v)) #define state_inv_set(s, i, v) ((s)->inv[(i)] = (v)) #else #define STATE_TILE_BITS 4 #define STATE_TILE_MASK ((1ull << STATE_TILE_BITS) - 1) #define state_tile_ofs(i) (i << 2) #define state_tile_get(i) \ ((state->tile & (STATE_TILE_MASK << state_tile_ofs(i))) >> \ state_tile_ofs(i)) #define state_tile_set(i, val) \ do \ { \ state->tile &= ~((STATE_TILE_MASK) << state_tile_ofs(i)); \ state->tile |= ((unsigned long long) val) << state_tile_ofs(i); \ } while (0) #endif #define distance(i, j) ((i) > (j) ? (i) - (j) : (j) - (i)) __device__ static void state_init(d_State *state, Input *input) { state->depth = input->init_depth; state->parent_dir = input->parent_dir; for (int i = 0; i < STATE_N; ++i) { if (input->tiles[i] == 0) state->empty = i; state_tile_set(state, i, input->tiles[i]); state_inv_set(state, input->tiles[i], i); } for (int i = 0; i < 4; i++) { state->h[i] = hash[i](state); state->rh[i] = rhash[i](state); } } __device__ static inline bool state_is_goal(d_State state) { return state_get_h(&state) == 0; } __device__ static inline int state_get_f(d_State state) { return state.depth + state_calc_h(&state); } __device__ __shared__ static bool movable_table_shared[STATE_N][DIR_N]; __device__ static inline bool state_movable(d_State state, Direction dir) { return movable_table_shared[state.empty][dir]; } __device__ __constant__ const static int pos_diff_table[DIR_N] = { -STATE_WIDTH, -1, 1, +STATE_WIDTH}; __device__ static inline bool state_move(d_State *state, Direction dir, int f_limit) { int new_empty = state->empty + pos_diff_table[dir]; int opponent = state_tile_get(state, new_empty); state_tile_set(state, state->empty, opponent); state_inv_set(state, opponent, state->empty); int pat = whichpat[opponent]; state->h[pat] = hash[pat](state); if (state->depth + 1 + state_get_h(state) <= f_limit) { int rpat = whichrefpat[opponent]; HashFunc rh; if (pat == 0) rh = rpat == 0 ? rhash[0] : rhash[2]; else if (pat == 1) rh = rpat == 2 ? rhash[2] : rhash[3]; else if (pat == 2) rh = rpat == 0 ? rhash[0] : rhash[1]; else rh = rpat == 1 ? rhash[1] : rhash[3]; state->rh[rpat] = rh(state); if (state->depth + 1 + state_get_rh(state) <= f_limit) { state->empty = new_empty; state->parent_dir = dir; ++state->depth; return true; } } return false; } /* stack implementation */ typedef struct div_stack_tag { unsigned int n; d_State buf[STACK_BUF_LEN]; } d_Stack; __device__ static inline bool stack_is_empty(d_Stack *stack) { bool ret = (stack->n == 0); __syncthreads(); return ret; } __device__ static inline void stack_put(d_Stack *stack, d_State *state, bool put) { if (put) { unsigned int i = atomicInc( &stack->n, UINT_MAX); /* slow? especially in old CC environment */ stack->buf[i] = *state; } __syncthreads(); } __device__ static inline bool stack_pop(d_Stack *stack, d_State *state) { int tid = threadIdx.x; int i = (int) stack->n - 1 - (int) (tid >> 2); if (i >= 0) *state = stack->buf[i]; __syncthreads(); if (tid == 0) stack->n = stack->n >= BLOCK_DIM / DIR_N ? stack->n - BLOCK_DIM / DIR_N : 0; __syncthreads(); return i >= 0; } //__device__ __shared__ Direction candidate_dir_table[4][3] = {} /* * solver implementation */ __device__ static void idas_internal(d_Stack *stack, int f_limit, search_stat *stat) { d_State state; unsigned long long int loop_cnt = 0; #ifdef COLLECT_LOG unsigned long long int nodes_expanded = 0; #endif if (threadIdx.x == 0) stat->solved = false; for (;;) { if (stack_is_empty(stack)) { stat->loads = loop_cnt; #ifdef COLLECT_LOG atomicAdd(&stat->nodes_expanded, nodes_expanded); #endif break; } ++loop_cnt; bool found = stack_pop(stack, &state), put = false; if (found) { Direction dir = threadIdx.x & 3; #ifdef COLLECT_LOG nodes_expanded++; #endif /* NOTE: candidate_dir_table may be effective to avoid divergence */ if (state.parent_dir == dir_reverse(dir)) continue; if (state_movable(state, dir)) { if (state_move(&state, dir, f_limit)) { if (state_is_goal(state)) { #ifndef SEARCH_ALL_THE_BEST asm("trap;"); #else stat->loads = loop_cnt; stat->len = state.depth; stat->solved = true; #endif #ifdef COLLECT_LOG atomicAdd(&stat->nodes_expanded, nodes_expanded); #endif } else put = true; } } } stack_put(stack, &state, put); } } __global__ void idas_kernel(Input *input, search_stat *stat, int f_limit, signed char *h_diff_table, bool *movable_table, unsigned char *h0_ptr, unsigned char *h1_ptr, d_Stack *stack_for_all) { //__shared__ d_Stack stack; int tid = threadIdx.x; int bid = blockIdx.x; d_Stack *stack = &(stack_for_all[bid]); if (tid == 0) { h0 = h0_ptr; h1 = h1_ptr; stat[bid].loads = 0; } d_State state; state_init(&state, &input[bid]); if (state_get_f(state) > f_limit) return; if (tid == 0) { stack->buf[0] = state; stack->n = 1; } for (int i = tid; i < STATE_N * DIR_N; i += blockDim.x) if (i < STATE_N * DIR_N) movable_table_shared[i / DIR_N][i % DIR_N] = movable_table[i]; __syncthreads(); idas_internal(stack, f_limit, &stat[bid]); } /* host library implementation */ #include <errno.h> #include <limits.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #ifndef UNABLE_LOG #define elog(...) fprintf(stderr, __VA_ARGS__) #else #define elog(...) ; #endif void * palloc(size_t size) { void *ptr = malloc(size); if (!ptr) elog("malloc failed\n"); return ptr; } void * repalloc(void *old_ptr, size_t new_size) { void *ptr = realloc(old_ptr, new_size); if (!ptr) elog("realloc failed\n"); return ptr; } void pfree(void *ptr) { if (!ptr) elog("empty ptr\n"); free(ptr); } #include <assert.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> typedef unsigned char idx_t; /* * [0,0] [1,0] [2,0] [3,0] * [0,1] [1,1] [2,1] [3,1] * [0,2] [1,2] [2,2] [3,2] * [0,3] [1,3] [2,3] [3,3] */ /* * goal state is * [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] */ typedef struct state_tag_cpu { int depth; /* XXX: needed? */ uchar pos[STATE_WIDTH][STATE_WIDTH]; idx_t i, j; /* pos of empty */ Direction parent_dir; int h_value; } * State; #define v(state, i, j) ((state)->pos[i][j]) #define ev(state) (v(state, state->i, state->j)) #define lv(state) (v(state, state->i - 1, state->j)) #define dv(state) (v(state, state->i, state->j + 1)) #define rv(state) (v(state, state->i + 1, state->j)) #define uv(state) (v(state, state->i, state->j - 1)) static uchar from_x[STATE_WIDTH * STATE_WIDTH], from_y[STATE_WIDTH * STATE_WIDTH]; static inline void fill_from_xy(State from) { for (idx_t x = 0; x < STATE_WIDTH; ++x) for (idx_t y = 0; y < STATE_WIDTH; ++y) { from_x[v(from, x, y)] = x; from_y[v(from, x, y)] = y; } } static inline int heuristic_manhattan_distance(State from) { int h_value = 0; fill_from_xy(from); for (idx_t i = 1; i < STATE_N; ++i) { h_value += distance(from_x[i], POS_X(i)); h_value += distance(from_y[i], POS_Y(i)); } return h_value; } bool state_is_goal(State state) { return state->h_value == 0; } static inline State state_alloc(void) { return (State) palloc(sizeof(struct state_tag_cpu)); } static inline void state_free(State state) { pfree(state); } State state_init(uchar v_list[STATE_WIDTH * STATE_WIDTH], int init_depth) { State state = state_alloc(); int cnt = 0; state->depth = init_depth; state->parent_dir = (Direction) -1; for (idx_t j = 0; j < STATE_WIDTH; ++j) for (idx_t i = 0; i < STATE_WIDTH; ++i) { if (v_list[cnt] == 0) { state->i = i; state->j = j; } v(state, i, j) = v_list[cnt++]; } state->h_value = heuristic_manhattan_distance(state); return state; } void state_fini(State state) { state_free(state); } State state_copy(State src) { State dst = state_alloc(); memcpy(dst, src, sizeof(*src)); return dst; } static inline bool state_left_movable(State state) { return state->i != 0; } static inline bool state_down_movable(State state) { return state->j != STATE_WIDTH - 1; } static inline bool state_right_movable(State state) { return state->i != STATE_WIDTH - 1; } static inline bool state_up_movable(State state) { return state->j != 0; } bool state_movable(State state, Direction dir) { return (dir != DIR_LEFT || state_left_movable(state)) && (dir != DIR_DOWN || state_down_movable(state)) && (dir != DIR_RIGHT || state_right_movable(state)) && (dir != DIR_UP || state_up_movable(state)); } #define h_diff(who, opponent, dir) \ (h_diff_table[((who) * STATE_N * DIR_N) + ((opponent) << 2) + (dir)]) static int h_diff_table[STATE_N * STATE_N * DIR_N]; void state_move(State state, Direction dir) { idx_t who; assert(state_movable(state, dir)); switch (dir) { case DIR_LEFT: who = ev(state) = lv(state); state->i--; break; case DIR_DOWN: who = ev(state) = dv(state); state->j++; break; case DIR_RIGHT: who = ev(state) = rv(state); state->i++; break; case DIR_UP: who = ev(state) = uv(state); state->j--; break; default: elog("unexpected direction"); assert(false); } state->h_value = state->h_value + h_diff(who, state->i + state->j * STATE_WIDTH, dir_reverse(dir)); state->parent_dir = dir; } bool state_pos_equal(State s1, State s2) { for (idx_t i = 0; i < STATE_WIDTH; ++i) for (idx_t j = 0; j < STATE_WIDTH; ++j) if (v(s1, i, j) != v(s2, i, j)) return false; return true; } size_t state_hash(State state) { /* FIXME: for A* */ size_t hash_value = 0; for (idx_t i = 0; i < STATE_WIDTH; ++i) for (idx_t j = 0; j < STATE_WIDTH; ++j) hash_value ^= (v(state, i, j) << ((i * 3 + j) << 2)); return hash_value; } int state_get_hvalue(State state) { return state->h_value; } int state_get_depth(State state) { return state->depth; } static void state_dump(State state) { elog("LOG(state): depth=%d, h=%d, f=%d, ", state->depth, state->h_value, state->depth + state->h_value); for (int i = 0; i < STATE_N; ++i) elog("%d%c", i == state->i + STATE_WIDTH * state->j ? 0 : state->pos[i % STATE_WIDTH][i / STATE_WIDTH], i == STATE_N - 1 ? '\n' : ','); } #include <stddef.h> #include <stdint.h> #include <string.h> #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif typedef enum { HT_SUCCESS = 0, HT_FAILED_FOUND, HT_FAILED_NOT_FOUND, } HTStatus; /* XXX: hash function for State should be surveyed */ inline static size_t hashfunc(State key) { return state_hash(key); } typedef struct ht_entry_tag *HTEntry; struct ht_entry_tag { HTEntry next; State key; int value; }; static HTEntry ht_entry_init(State key) { HTEntry entry = (HTEntry) palloc(sizeof(*entry)); entry->key = state_copy(key); entry->next = NULL; return entry; } static void ht_entry_fini(HTEntry entry) { pfree(entry); } typedef struct ht_tag { size_t n_bins; size_t n_elems; HTEntry *bin; } * HT; static bool ht_rehash_required(HT ht) { return ht->n_bins <= ht->n_elems; /* TODO: local policy is also needed */ } static size_t calc_n_bins(size_t required) { /* NOTE: n_bins is used for mask and hence it should be pow of 2, fon now */ size_t size = 1; assert(required > 0); while (required > size) size <<= 1; return size; } HT ht_init(size_t init_size_hint) { size_t n_bins = calc_n_bins(init_size_hint); HT ht = (HT) palloc(sizeof(*ht)); ht->n_bins = n_bins; ht->n_elems = 0; assert(sizeof(*ht->bin) <= SIZE_MAX / n_bins); ht->bin = (HTEntry *) palloc(sizeof(*ht->bin) * n_bins); memset(ht->bin, 0, sizeof(*ht->bin) * n_bins); return ht; } static void ht_rehash(HT ht) { HTEntry *new_bin; size_t new_size = ht->n_bins << 1; assert(ht->n_bins<SIZE_MAX>> 1); new_bin = (HTEntry *) palloc(sizeof(*new_bin) * new_size); memset(new_bin, 0, sizeof(*new_bin) * new_size); for (size_t i = 0; i < ht->n_bins; ++i) { HTEntry entry = ht->bin[i]; while (entry) { HTEntry next = entry->next; size_t idx = hashfunc(entry->key) & (new_size - 1); entry->next = new_bin[idx]; new_bin[idx] = entry; entry = next; } } pfree(ht->bin); ht->n_bins = new_size; ht->bin = new_bin; } void ht_fini(HT ht) { for (size_t i = 0; i < ht->n_bins; ++i) { HTEntry entry = ht->bin[i]; while (entry) { HTEntry next = entry->next; state_fini(entry->key); ht_entry_fini(entry); entry = next; } } pfree(ht->bin); pfree(ht); } HTStatus ht_insert(HT ht, State key, int **value) { size_t i; HTEntry entry, new_entry; if (ht_rehash_required(ht)) ht_rehash(ht); i = hashfunc(key) & (ht->n_bins - 1); entry = ht->bin[i]; while (entry) { if (state_pos_equal(key, entry->key)) { *value = &entry->value; return HT_FAILED_FOUND; } entry = entry->next; } new_entry = ht_entry_init(key); new_entry->next = ht->bin[i]; ht->bin[i] = new_entry; *value = &new_entry->value; assert(ht->n_elems < SIZE_MAX); ht->n_elems++; return HT_SUCCESS; } /* * Priority Queue implementation */ #include <assert.h> #include <stdint.h> typedef struct pq_entry_tag { State state; int f, g; } PQEntryData; typedef PQEntryData *PQEntry; /* tiebreaking is done comparing g value */ static inline bool pq_entry_higher_priority(PQEntry e1, PQEntry e2) { return e1->f < e2->f || (e1->f == e2->f && e1->g >= e2->g); } /* * NOTE: * This priority queue is implemented doubly reallocated array. * It will only extend and will not shrink, for now. * It may be improved by using array of layers of iteratively widened array */ typedef struct pq_tag { size_t n_elems; size_t capa; PQEntryData *array; } * PQ; static inline size_t calc_init_capa(size_t capa_hint) { size_t capa = 1; assert(capa_hint > 0); while (capa < capa_hint) capa <<= 1; return capa - 1; } PQ pq_init(size_t init_capa_hint) { PQ pq = (PQ) palloc(sizeof(*pq)); pq->n_elems = 0; pq->capa = calc_init_capa(init_capa_hint); assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData)); pq->array = (PQEntryData *) palloc(sizeof(PQEntryData) * pq->capa); return pq; } void pq_fini(PQ pq) { for (size_t i = 0; i < pq->n_elems; ++i) state_fini(pq->array[i].state); pfree(pq->array); pfree(pq); } static inline bool pq_is_full(PQ pq) { assert(pq->n_elems <= pq->capa); return pq->n_elems == pq->capa; } static inline void pq_extend(PQ pq) { pq->capa = (pq->capa << 1) + 1; assert(pq->capa <= SIZE_MAX / sizeof(PQEntryData)); pq->array = (PQEntryData *) repalloc(pq->array, sizeof(PQEntryData) * pq->capa); } static inline void pq_swap_entry(PQ pq, size_t i, size_t j) { PQEntryData tmp = pq->array[i]; pq->array[i] = pq->array[j]; pq->array[j] = tmp; } static inline size_t pq_up(size_t i) { /* NOTE: By using 1-origin, it may be written more simply, i >> 1 */ return (i - 1) >> 1; } static inline size_t pq_left(size_t i) { return (i << 1) + 1; } static void heapify_up(PQ pq) { for (size_t i = pq->n_elems; i > 0;) { size_t ui = pq_up(i); assert(i > 0); if (!pq_entry_higher_priority(&pq->array[i], &pq->array[ui])) break; pq_swap_entry(pq, i, ui); i = ui; } } void pq_put(PQ pq, State state, int f, int g) { if (pq_is_full(pq)) pq_extend(pq); pq->array[pq->n_elems].state = state_copy(state); pq->array[pq->n_elems].f = f; /* this may be abundant */ pq->array[pq->n_elems].g = g; heapify_up(pq); ++pq->n_elems; } static void heapify_down(PQ pq) { size_t sentinel = pq->n_elems; for (size_t i = 0;;) { size_t ri, li = pq_left(i); if (li >= sentinel) break; ri = li + 1; if (ri >= sentinel) { if (pq_entry_higher_priority(&pq->array[li], &pq->array[i])) pq_swap_entry(pq, i, li); /* Reached the bottom */ break; } /* NOTE: If p(ri) == p(li), it may be good to go right * since the filling order is left-first */ if (pq_entry_higher_priority(&pq->array[li], &pq->array[ri])) { if (!pq_entry_higher_priority(&pq->array[li], &pq->array[i])) break; pq_swap_entry(pq, i, li); i = li; } else { if (!pq_entry_higher_priority(&pq->array[ri], &pq->array[i])) break; pq_swap_entry(pq, i, ri); i = ri; } } } State pq_pop(PQ pq) { State ret_state; if (pq->n_elems == 0) return NULL; ret_state = pq->array[0].state; --pq->n_elems; pq->array[0] = pq->array[pq->n_elems]; heapify_down(pq); return ret_state; } void pq_dump(PQ pq) { elog("%s: n_elems=%zu, capa=%zu\n", __func__, pq->n_elems, pq->capa); for (size_t i = 0, cr_required = 1; i < pq->n_elems; i++) { if (i == cr_required) { elog("\n"); cr_required = (cr_required << 1) + 1; } elog("%d,", pq->array[i].f); elog("%d ", pq->array[i].g); } elog("\n"); } #include <stdlib.h> #include <string.h> int rrand(int m) { return (int) ((double) m * (rand() / (RAND_MAX + 1.0))); } void shuffle_input(Input input[], int n_inputs) { Input tmp; size_t n = n_inputs; while (n > 1) { size_t k = rrand(n--); memcpy(&tmp, &input[n], sizeof(Input)); memcpy(&input[n], &input[k], sizeof(Input)); memcpy(&input[k], &tmp, sizeof(Input)); } } static HT closed; bool distribute_astar(State init_state, Input input[], int distr_n, int *cnt_inputs, int *min_fvalue) { int cnt = 0; State state; PQ q = pq_init(distr_n + 10); HTStatus ht_status; int * ht_value; bool solved = false; closed = ht_init(10000); ht_status = ht_insert(closed, init_state, &ht_value); *ht_value = 0; pq_put(q, state_copy(init_state), state_get_hvalue(init_state), 0); ++cnt; while ((state = pq_pop(q))) { --cnt; if (state_is_goal(state)) { solved = true; break; } ht_status = ht_insert(closed, state, &ht_value); if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state)) { state_fini(state); continue; } else *ht_value = state_get_depth(state); for (int dir = 0; dir < DIR_N; ++dir) { if (state->parent_dir != dir_reverse(dir) && state_movable(state, (Direction) dir)) { State next_state = state_copy(state); state_move(next_state, (Direction) dir); next_state->depth++; ht_status = ht_insert(closed, next_state, &ht_value); if (ht_status == HT_FAILED_FOUND && *ht_value <= state_get_depth(next_state)) state_fini(next_state); else { ++cnt; *ht_value = state_get_depth(next_state); pq_put(q, next_state, *ht_value + state_get_hvalue(next_state), *ht_value); } } } state_fini(state); if (cnt >= distr_n) break; } *cnt_inputs = cnt; elog("LOG: init_distr, cnt=%d\n", cnt); if (!solved) { int minf = INT_MAX; for (int id = 0; id < cnt; ++id) { State state = pq_pop(q); assert(state); for (int i = 0; i < STATE_N; ++i) input[id].tiles[i] = state->pos[i % STATE_WIDTH][i / STATE_WIDTH]; input[id].tiles[state->i + (state->j * STATE_WIDTH)] = 0; input[id].init_depth = state_get_depth(state); input[id].parent_dir = state->parent_dir; if (minf > state_get_depth(state) + state_get_hvalue(state)) minf = state_get_depth(state) + state_get_hvalue(state); } assert(pq_pop(q) == NULL); // shuffle_input(input, cnt); *min_fvalue = minf; } pq_fini(q); return solved; } static int input_devide(Input input[], search_stat stat[], int i, int devide_n, int tail, int *buf_len) { int cnt = 0; int * ht_value; State state = state_init(input[i].tiles, input[i].init_depth); state->parent_dir = input[i].parent_dir; PQ pq = pq_init(devide_n); HTStatus ht_status; pq_put(pq, state, state_get_hvalue(state), 0); ++cnt; assert(devide_n > 0); while ((state = pq_pop(pq))) { --cnt; if (state_is_goal(state)) { /* It may not be optimal goal */ pq_put(pq, state, state_get_depth(state) + state_get_hvalue(state), state_get_depth(state)); ++cnt; break; } ht_status = ht_insert(closed, state, &ht_value); if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(state)) { state_fini(state); continue; } else *ht_value = state_get_depth(state); for (int dir = 0; dir < DIR_N; ++dir) { if (state->parent_dir != dir_reverse(dir) && state_movable(state, (Direction) dir)) { State next_state = state_copy(state); state_move(next_state, (Direction) dir); next_state->depth++; ht_status = ht_insert(closed, next_state, &ht_value); if (ht_status == HT_FAILED_FOUND && *ht_value < state_get_depth(next_state)) state_fini(next_state); else { ++cnt; *ht_value = state_get_depth(next_state); pq_put(pq, next_state, *ht_value + state_get_hvalue(next_state), *ht_value); } } } state_fini(state); if (cnt >= devide_n) break; } int new_buf_len = *buf_len; while (tail + cnt >= new_buf_len) new_buf_len <<= 1; if (new_buf_len != *buf_len) { *buf_len = new_buf_len; repalloc(input, sizeof(*input) * new_buf_len); elog("LOG: host buf resize\n"); } input[i] = input[tail - 1]; for (int id = 0; id < cnt; ++id) { int ofs = tail - 1 + id; State state = pq_pop(pq); assert(state); for (int j = 0; j < STATE_N; ++j) input[ofs].tiles[j] = state->pos[j % STATE_WIDTH][j / STATE_WIDTH]; input[ofs].tiles[state->i + (state->j * STATE_WIDTH)] = 0; input[ofs].init_depth = state_get_depth(state); input[ofs].parent_dir = state->parent_dir; } pq_fini(pq); return cnt - 1; } /* main */ #include <errno.h> #include <stdio.h> #include <stdlib.h> #define exit_failure(...) \ do \ { \ printf(__VA_ARGS__); \ exit(EXIT_FAILURE); \ } while (0) static int pop_int_from_str(const char *str, char **end_ptr) { long int rv = strtol(str, end_ptr, 0); errno = 0; if (errno != 0) exit_failure("%s: %s cannot be converted into long\n", __func__, str); else if (end_ptr && str == *end_ptr) exit_failure("%s: reach end of string", __func__); if (rv > INT_MAX || rv < INT_MIN) exit_failure("%s: too big number, %ld\n", __func__, rv); return (int) rv; } #define MAX_LINE_LEN 100 static void load_state_from_file(const char *fname, uchar *s) { FILE *fp; char str[MAX_LINE_LEN]; char *str_ptr = str, *end_ptr; fp = fopen(fname, "r"); if (!fp) exit_failure("%s: %s cannot be opened\n", __func__, fname); if (!fgets(str, MAX_LINE_LEN, fp)) exit_failure("%s: fgets failed\n", __func__); for (int i = 0; i < STATE_N; ++i) { s[i] = pop_int_from_str(str_ptr, &end_ptr); str_ptr = end_ptr; } fclose(fp); } #undef MAX_LINE_LEN #define CUDA_CHECK(call) \ do \ { \ const cudaError_t e = call; \ if (e != cudaSuccess) \ exit_failure("Error: %s:%d code:%d, reason: %s\n", __FILE__, \ __LINE__, e, cudaGetErrorString(e)); \ } while (0) __host__ static void * cudaPalloc(size_t size) { void *ptr; CUDA_CHECK(cudaMalloc(&ptr, size)); return ptr; } __host__ static void cudaPfree(void *ptr) { CUDA_CHECK(cudaFree(ptr)); } #define h_d_t(op, i, dir) \ (h_diff_table[(op) *STATE_N * DIR_N + (i) *DIR_N + (dir)]) __host__ static void init_mdist(signed char h_diff_table[]) { for (int opponent = 0; opponent < STATE_N; ++opponent) { int goal_x = POS_X(opponent), goal_y = POS_Y(opponent); for (int i = 0; i < STATE_N; ++i) { int from_x = POS_X(i), from_y = POS_Y(i); for (uchar dir = 0; dir < DIR_N; ++dir) { if (dir == DIR_LEFT) h_d_t(opponent, i, dir) = goal_x > from_x ? -1 : 1; if (dir == DIR_RIGHT) h_d_t(opponent, i, dir) = goal_x < from_x ? -1 : 1; if (dir == DIR_UP) h_d_t(opponent, i, dir) = goal_y > from_y ? -1 : 1; if (dir == DIR_DOWN) h_d_t(opponent, i, dir) = goal_y < from_y ? -1 : 1; } } } } #undef h_d_t #define m_t(i, d) (movable_table[(i) *DIR_N + (d)]) __host__ static void init_movable_table(bool movable_table[]) { for (int i = 0; i < STATE_N; ++i) for (unsigned int d = 0; d < DIR_N; ++d) { if (d == DIR_RIGHT) m_t(i, d) = (POS_X(i) < STATE_WIDTH - 1); else if (d == DIR_LEFT) m_t(i, d) = (POS_X(i) > 0); else if (d == DIR_DOWN) m_t(i, d) = (POS_Y(i) < STATE_WIDTH - 1); else if (d == DIR_UP) m_t(i, d) = (POS_Y(i) > 0); } } #undef m_t static FILE *infile; /* pointer to heuristic table file */ static unsigned char h_h0[TABLESIZE]; static unsigned char h_h1[TABLESIZE]; static __host__ void readfile(unsigned char table[]) { int pos[6]; /* positions of each pattern tile */ int index; /* direct access index */ for (pos[0] = 0; pos[0] < STATE_N; pos[0]++) { for (pos[1] = 0; pos[1] < STATE_N; pos[1]++) { if (pos[1] == pos[0]) continue; for (pos[2] = 0; pos[2] < STATE_N; pos[2]++) { if (pos[2] == pos[0] || pos[2] == pos[1]) continue; for (pos[3] = 0; pos[3] < STATE_N; pos[3]++) { if (pos[3] == pos[0] || pos[3] == pos[1] || pos[3] == pos[2]) continue; for (pos[4] = 0; pos[4] < STATE_N; pos[4]++) { if (pos[4] == pos[0] || pos[4] == pos[1] || pos[4] == pos[2] || pos[4] == pos[3]) continue; for (pos[5] = 0; pos[5] < STATE_N; pos[5]++) { if (pos[5] == pos[0] || pos[5] == pos[1] || pos[5] == pos[2] || pos[5] == pos[3] || pos[5] == pos[4]) continue; index = ((((pos[0]*25+pos[1])*25+pos[2])*25+pos[3])*25+pos[4])*25+pos[5]; table[index] = getc (infile); } } } } } } } static __host__ void pdb_load(void) { infile = fopen("pattern_1_2_5_6_7_12", "rb"); /* read 6-tile pattern database */ readfile (h_h0); /* read database and expand into direct-access array */ fclose(infile); printf ("pattern 1 2 5 6 7 12 read in\n"); infile = fopen("pattern_3_4_8_9_13_14", "rb"); /* read 6-tile pattern database */ readfile (h_h1); /* read database and expand into direct-access array */ fclose(infile); printf ("pattern 3 4 8 9 13 14 read in\n"); } // static char dir_char[] = {'U', 'R', 'L', 'D'}; #define INPUT_SIZE (sizeof(Input) * buf_len) #define STAT_SIZE (sizeof(search_stat) * buf_len) #define MOVABLE_TABLE_SIZE (sizeof(bool) * STATE_N * DIR_N) #define H_DIFF_TABLE_SIZE (STATE_N * STATE_N * DIR_N) #define INIT_STACK_SIZE (sizeof(d_Stack) * 100000) int main(int argc, char *argv[]) { int n_roots; int buf_len = N_INIT_DISTRIBUTION * MAX_BUF_RATIO; Input *input = (Input *) palloc(INPUT_SIZE), *d_input = (Input *) cudaPalloc(INPUT_SIZE); search_stat *stat = (search_stat *) palloc(STAT_SIZE), *d_stat = (search_stat *) cudaPalloc(STAT_SIZE); bool *movable_table = (bool *) palloc(MOVABLE_TABLE_SIZE), *d_movable_table = (bool *) cudaPalloc(MOVABLE_TABLE_SIZE); signed char *h_diff_table = (signed char *) palloc(H_DIFF_TABLE_SIZE), *d_h_diff_table = (signed char *) cudaPalloc(H_DIFF_TABLE_SIZE); unsigned char *d_h0 = (unsigned char *) cudaPalloc(TABLESIZE); unsigned char *d_h1 = (unsigned char *) cudaPalloc(TABLESIZE); d_Stack *stack_for_all = (d_Stack *) cudaPalloc(INIT_STACK_SIZE); int min_fvalue = 0; if (argc != 2) exit_failure("usage: bin/cumain <ifname>\n"); load_state_from_file(argv[1], input[0].tiles); pdb_load(); CUDA_CHECK(cudaMemcpy(d_h0, h_h0, TABLESIZE, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(d_h1, h_h1, TABLESIZE, cudaMemcpyHostToDevice)); { State init_state = state_init(input[0].tiles, 0); state_dump(init_state); if (distribute_astar(init_state, input, N_INIT_DISTRIBUTION, &n_roots, &min_fvalue)) { elog("solution is found by distributor\n"); goto solution_found; } state_fini(init_state); } init_mdist(h_diff_table); init_movable_table(movable_table); CUDA_CHECK(cudaMemcpy(d_movable_table, movable_table, MOVABLE_TABLE_SIZE, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(d_h_diff_table, h_diff_table, H_DIFF_TABLE_SIZE, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemset(d_input, 0, INPUT_SIZE)); for (uchar f_limit = min_fvalue;; f_limit += 2) { CUDA_CHECK(cudaMemset(d_stat, 0, STAT_SIZE)); CUDA_CHECK( cudaMemcpy(d_input, input, INPUT_SIZE, cudaMemcpyHostToDevice)); elog("f_limit=%d\n", (int) f_limit); idas_kernel<<<n_roots, BLOCK_DIM>>>(d_input, d_stat, f_limit, d_h_diff_table, d_movable_table, d_h0, d_h1, stack_for_all); CUDA_CHECK( cudaGetLastError()); /* asm trap is called when find solution */ CUDA_CHECK(cudaMemcpy(stat, d_stat, STAT_SIZE, cudaMemcpyDeviceToHost)); unsigned long long int loads_sum = 0; for (int i = 0; i < n_roots; ++i) loads_sum += stat[i].loads; #ifdef COLLECT_LOG elog("STAT: loop\n"); for (int i = 0; i < n_roots; ++i) elog("%lld, ", stat[i].loads); putchar('\n'); elog("STAT: nodes_expanded\n"); for (int i = 0; i < n_roots; ++i) elog("%lld, ", stat[i].nodes_expanded); putchar('\n'); elog("STAT: efficiency\n"); for (int i = 0; i < n_roots; ++i) if (stat[i].loads != 0) elog("%lld, ", stat[i].nodes_expanded / stat[i].loads); putchar('\n'); #endif int increased = 0; unsigned long long int loads_av = loads_sum / n_roots; int stat_cnt[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; for (int i = 0; i < n_roots; ++i) { if (stat[i].loads < loads_av) stat_cnt[0]++; else if (stat[i].loads < 2 * loads_av) stat_cnt[1]++; else if (stat[i].loads < 4 * loads_av) stat_cnt[2]++; else if (stat[i].loads < 8 * loads_av) stat_cnt[3]++; else if (stat[i].loads < 16 * loads_av) stat_cnt[4]++; else if (stat[i].loads < 32 * loads_av) stat_cnt[5]++; else if (stat[i].loads < 64 * loads_av) stat_cnt[6]++; else if (stat[i].loads < 128 * loads_av) stat_cnt[7]++; else stat_cnt[8]++; int policy = loads_av == 0 ? stat[i].loads : (stat[i].loads - 1) / loads_av + 1; int buf_len_old = buf_len; if (policy > 1 && stat[i].loads > 10) increased += input_devide(input, stat, i, policy, n_roots + increased, &buf_len); if (buf_len != buf_len_old) { elog("XXX: fix MAX_BUF_RATIO\n"); stat = (search_stat *) repalloc(stat, STAT_SIZE); cudaPfree(d_input); cudaPfree(d_stat); d_input = (Input *) cudaPalloc(INPUT_SIZE); d_stat = (search_stat *) cudaPalloc(STAT_SIZE); } } elog("STAT: loads: sum=%lld, av=%lld\n", loads_sum, loads_av); elog("STAT: distr: av=%d, 2av=%d, 4av=%d, 8av=%d, 16av=%d, 32av=%d, " "64av=%d, 128av=%d, more=%d\n", stat_cnt[0], stat_cnt[1], stat_cnt[2], stat_cnt[3], stat_cnt[4], stat_cnt[5], stat_cnt[6], stat_cnt[7], stat_cnt[8]); n_roots += increased; elog("STAT: n_roots=%d(+%d)\n", n_roots, increased); #ifdef SEARCH_ALL_THE_BEST for (int i = 0; i < n_roots; ++i) if (stat[i].solved) { elog("find all the optimal solution(s), at depth=%d\n", stat[i].len); goto solution_found; } #endif } solution_found: cudaPfree(d_input); cudaPfree(d_stat); cudaPfree(d_movable_table); cudaPfree(d_h_diff_table); cudaPfree(d_h0); cudaPfree(d_h1); CUDA_CHECK(cudaDeviceReset()); pfree(input); pfree(stat); pfree(movable_table); pfree(h_diff_table); return 0; }
90d7302b4b8f9ccf57557ec782417dc7cbd920a6.hip
// !!! This is a file automatically generated by hipify!!! #include "./cuda_include.h" void set_device( int id ) { int current_id; cuda_assert( hipGetDevice( &current_id ) ); if ( current_id != id ) cuda_assert( hipSetDevice( id ) ); } void cuda_allocate( void** p, unsigned long n ) { cuda_assert( hipMalloc( p, n ) ); cuda_assert( hipMemset( *p, 0, n ) ); } void cuda_deallocate( void* p ) { cuda_assert( hipFree( p ) ); } void cuda_memcopy_host_to_device( const void* src, unsigned long n, void* dst ) { cuda_assert( hipMemcpy( dst, src, n, hipMemcpyHostToDevice ) ); } void cuda_memcopy_device_to_host( const void* src, unsigned long n, void* dst ) { cuda_assert( hipMemcpy( dst, src, n, hipMemcpyDeviceToHost ) ); } double dot( double* x_begin, double* x_end, double* y_begin ) { hipblasHandle_t handle; cublas_assert( hipblasCreate(&handle) ); unsigned long n = x_end - x_begin; double ans; cublas_assert( hipblasDdot( handle, n, x_begin, 1, y_begin, 1, &ans ) ); cublas_assert( hipblasDestroy(handle) ); return ans; }
90d7302b4b8f9ccf57557ec782417dc7cbd920a6.cu
#include "./cuda_include.h" void set_device( int id ) { int current_id; cuda_assert( cudaGetDevice( &current_id ) ); if ( current_id != id ) cuda_assert( cudaSetDevice( id ) ); } void cuda_allocate( void** p, unsigned long n ) { cuda_assert( cudaMalloc( p, n ) ); cuda_assert( cudaMemset( *p, 0, n ) ); } void cuda_deallocate( void* p ) { cuda_assert( cudaFree( p ) ); } void cuda_memcopy_host_to_device( const void* src, unsigned long n, void* dst ) { cuda_assert( cudaMemcpy( dst, src, n, cudaMemcpyHostToDevice ) ); } void cuda_memcopy_device_to_host( const void* src, unsigned long n, void* dst ) { cuda_assert( cudaMemcpy( dst, src, n, cudaMemcpyDeviceToHost ) ); } double dot( double* x_begin, double* x_end, double* y_begin ) { cublasHandle_t handle; cublas_assert( cublasCreate(&handle) ); unsigned long n = x_end - x_begin; double ans; cublas_assert( cublasDdot( handle, n, x_begin, 1, y_begin, 1, &ans ) ); cublas_assert( cublasDestroy(handle) ); return ans; }
4d945adc207e8885cede45938f8b7a96743e7c38.hip
// !!! This is a file automatically generated by hipify!!! /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2021, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/solver/lower_trs_kernels.hpp" #include <memory> #include <hip/hip_runtime.h> #include <hipsparse.h> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/solver/lower_trs.hpp> #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/solver/common_trs_kernels.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The LOWER_TRS solver namespace. * * @ingroup lower_trs */ namespace lower_trs { void should_perform_transpose(std::shared_ptr<const CudaExecutor> exec, bool& do_transpose) { should_perform_transpose_kernel(exec, do_transpose); } void init_struct(std::shared_ptr<const CudaExecutor> exec, std::shared_ptr<solver::SolveStruct>& solve_struct) { init_struct_kernel(exec, solve_struct); } template <typename ValueType, typename IndexType> void generate(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType>* matrix, solver::SolveStruct* solve_struct, const gko::size_type num_rhs) { generate_kernel<ValueType, IndexType>(exec, matrix, solve_struct, num_rhs, false); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_LOWER_TRS_GENERATE_KERNEL); template <typename ValueType, typename IndexType> void solve(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType>* matrix, const solver::SolveStruct* solve_struct, matrix::Dense<ValueType>* trans_b, matrix::Dense<ValueType>* trans_x, const matrix::Dense<ValueType>* b, matrix::Dense<ValueType>* x) { solve_kernel<ValueType, IndexType>(exec, matrix, solve_struct, trans_b, trans_x, b, x); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_LOWER_TRS_SOLVE_KERNEL); } // namespace lower_trs } // namespace cuda } // namespace kernels } // namespace gko
4d945adc207e8885cede45938f8b7a96743e7c38.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2021, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/solver/lower_trs_kernels.hpp" #include <memory> #include <cuda.h> #include <cusparse.h> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/solver/lower_trs.hpp> #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/math.hpp" #include "cuda/base/types.hpp" #include "cuda/solver/common_trs_kernels.cuh" namespace gko { namespace kernels { namespace cuda { /** * @brief The LOWER_TRS solver namespace. * * @ingroup lower_trs */ namespace lower_trs { void should_perform_transpose(std::shared_ptr<const CudaExecutor> exec, bool& do_transpose) { should_perform_transpose_kernel(exec, do_transpose); } void init_struct(std::shared_ptr<const CudaExecutor> exec, std::shared_ptr<solver::SolveStruct>& solve_struct) { init_struct_kernel(exec, solve_struct); } template <typename ValueType, typename IndexType> void generate(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType>* matrix, solver::SolveStruct* solve_struct, const gko::size_type num_rhs) { generate_kernel<ValueType, IndexType>(exec, matrix, solve_struct, num_rhs, false); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_LOWER_TRS_GENERATE_KERNEL); template <typename ValueType, typename IndexType> void solve(std::shared_ptr<const CudaExecutor> exec, const matrix::Csr<ValueType, IndexType>* matrix, const solver::SolveStruct* solve_struct, matrix::Dense<ValueType>* trans_b, matrix::Dense<ValueType>* trans_x, const matrix::Dense<ValueType>* b, matrix::Dense<ValueType>* x) { solve_kernel<ValueType, IndexType>(exec, matrix, solve_struct, trans_b, trans_x, b, x); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_LOWER_TRS_SOLVE_KERNEL); } // namespace lower_trs } // namespace cuda } // namespace kernels } // namespace gko
aca88b99f9ac52146b81ff13df0b413305ee0b3e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Matrix normalization. * Compile with "gcc matrixNorm.c" */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <math.h> /* Program Parameters */ #define N 9000 /* Matrix size */ /* Matrices */ volatile float A[N][N], B[N][N]; // Flattened array A & B float flattenA[N * N], flattenB[N * N]; /* Initialize A and B*/ void initialize_inputs() { int row, col; // FIXED RANDOM SEED = 22 srand(22); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { A[row][col] = (float)rand() / 32768.0; B[row][col] = 0.0; } } } // Printing array func // This will help us in both seeing inputs/results before and after normalization void print_arrays() { int row, col; if (N < 10) { printf("\nA =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t"); } } printf("\nB =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%5.2f%s", B[row][col], (col < N-1) ? ", " : ";\n\t"); } } printf("\n"); } } // Even though CUDA API has hipMemcpy2D, I found it much easier // to convert a 2D array into a 1D with mapping scheme like this void flattenArray() { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { flattenA[i * N + j] = A[i][j]; flattenB[i * N + j] = B[i][j]; } } } // This function basically prints out a 1D array to console void checkFlatten(float targetArray[]) { if (N < 10) { printf("---- Checking ----\n"); for (int i = 0; i < (N * N); i++) { if (i % N == 0 && i != 0) { printf("\n"); } printf("%5.2f ", targetArray[i]); } printf("\n"); } } // CUDA device information void getCudaDevices(int nDevices) { for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } } // GPU parallel matrix normalization kernel __global__ void gpuMatrixNorm(float *flattenA, float *flattenB, int arraySize) { float mu, sigma; // Index when inside GPU i.e. threadID // After flattenning, we can access a pseudo 2D array in the same way // we flatten it i.e. A[i][j] == flattenA[i * arraySize + idx] int idx = blockIdx.x * blockDim.x + threadIdx.x; // If clause here is to prevent faulty computation tasks // where gpu processes an index that is beyond the scope // of the vector A if (idx < arraySize) { // Mean mu = 0.0; for (int row = 0; row < arraySize; row++) { mu += flattenA[row * arraySize + idx]; } mu /= (float) arraySize; // Wait here until every mean computations have arrived // Once arrived, then continue to Standard deviation // syncthreads == barrier __syncthreads(); // Standard deviation sigma = 0.0; for (int row = 0; row < arraySize; row++) { sigma += powf((flattenA[row * arraySize + idx] - mu), 2.0); } sigma /= (float) arraySize; // Wait here until every Standard deviation computations have arrived // Once arrived, then continue to compute the final result for B // syncthreads == barrier __syncthreads(); sigma = sqrt(sigma); // Normalization calculation for (int row = 0; row < arraySize; row++) { if (sigma == 0.0) { flattenB[row * arraySize + idx] = 0.0; } else { flattenB[row * arraySize + idx] = (flattenA[row * arraySize + idx] - mu) / sigma; } } } } int main(int argc, char **argv) { // Variables for CUDA float *device_A, *device_B; int nDevices; int cudaRunTimeVersion; int cudaDriverVersion; /* Timing variables */ struct timeval startGPU, stopGPU; /* Elapsed times using gettimeofday() */ struct timezone tzdummy; unsigned long long runtimeGPU; /* Initialize A and B */ initialize_inputs(); // Sanity check after inputs initialization printf("---- Initialized inputs ----\n"); print_arrays(); // Flatten 2D array A & B flattenArray(); // Sanity check after flattening // Usually commented this out ... I only un-conmment it to validate the flattening process went ok // NOTE: This will only print out if N < 10 checkFlatten(flattenA); // After flattening, size of array flattenA will be N * N int arraySize = sizeof(float) * N * N; // Cuda device info hipGetDeviceCount(&nDevices); getCudaDevices(nDevices); hipRuntimeGetVersion(&cudaRunTimeVersion); hipDriverGetVersion(&cudaDriverVersion); // Printing out CUDA runtime & driver version to console printf("Cuda Runtime Version: %i\n", cudaRunTimeVersion); printf("Cuda Driver Version: %i\n", cudaDriverVersion); // Start Clock GPU printf("---------------------------------------------\n"); printf("Matrix size N = %d", N); printf("\nStarting clock for GPU.\n\n"); gettimeofday(&startGPU, &tzdummy); // Allocating space for GPU device hipMalloc((void**)&device_A, arraySize); hipMalloc((void**)&device_B, arraySize); // Copying array A from HOST to GPU hipMemcpy(device_A, flattenA, arraySize, hipMemcpyHostToDevice); // Launch GPU kernel gpuMatrixNorm hipLaunchKernelGGL(( gpuMatrixNorm), dim3(N), dim3(N), 0, 0, device_A, device_B, N); // Copying array B from GPU to HOST // Initially I had hipDeviceSynchronize() before copying B from device to host // However, by reading CUDA's doc further, hipMemcpy is a blocking method hipMemcpy(flattenB, device_B, arraySize, hipMemcpyDeviceToHost); /* Stop Clock */ gettimeofday(&stopGPU, &tzdummy); runtimeGPU = (unsigned long long)(stopGPU.tv_sec - startGPU.tv_sec) * 1000000 + (stopGPU.tv_usec - startGPU.tv_usec); /* Display timing results */ printf("GPU Runtime = %g ms.\n", (float)runtimeGPU/(float)1000); printf("\nStopped clock for GPU."); printf("\n---------------------------------------------\n"); printf("---- Results ----\n"); // Sanity check the result after computes by GPU and deliver back to host machine // Usually commented this out ... I only un-comment it to validate the computed result went ok // NOTE: This will only print out if N < 10 checkFlatten(flattenB); // Freeing memory in GPU device hipFree(device_A); hipFree(device_B); exit(0); }
aca88b99f9ac52146b81ff13df0b413305ee0b3e.cu
/* Matrix normalization. * Compile with "gcc matrixNorm.c" */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <math.h> /* Program Parameters */ #define N 9000 /* Matrix size */ /* Matrices */ volatile float A[N][N], B[N][N]; // Flattened array A & B float flattenA[N * N], flattenB[N * N]; /* Initialize A and B*/ void initialize_inputs() { int row, col; // FIXED RANDOM SEED = 22 srand(22); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { A[row][col] = (float)rand() / 32768.0; B[row][col] = 0.0; } } } // Printing array func // This will help us in both seeing inputs/results before and after normalization void print_arrays() { int row, col; if (N < 10) { printf("\nA =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t"); } } printf("\nB =\n\t"); for (row = 0; row < N; row++) { for (col = 0; col < N; col++) { printf("%5.2f%s", B[row][col], (col < N-1) ? ", " : ";\n\t"); } } printf("\n"); } } // Even though CUDA API has cudaMemcpy2D, I found it much easier // to convert a 2D array into a 1D with mapping scheme like this void flattenArray() { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { flattenA[i * N + j] = A[i][j]; flattenB[i * N + j] = B[i][j]; } } } // This function basically prints out a 1D array to console void checkFlatten(float targetArray[]) { if (N < 10) { printf("---- Checking ----\n"); for (int i = 0; i < (N * N); i++) { if (i % N == 0 && i != 0) { printf("\n"); } printf("%5.2f ", targetArray[i]); } printf("\n"); } } // CUDA device information void getCudaDevices(int nDevices) { for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } } // GPU parallel matrix normalization kernel __global__ void gpuMatrixNorm(float *flattenA, float *flattenB, int arraySize) { float mu, sigma; // Index when inside GPU i.e. threadID // After flattenning, we can access a pseudo 2D array in the same way // we flatten it i.e. A[i][j] == flattenA[i * arraySize + idx] int idx = blockIdx.x * blockDim.x + threadIdx.x; // If clause here is to prevent faulty computation tasks // where gpu processes an index that is beyond the scope // of the vector A if (idx < arraySize) { // Mean mu = 0.0; for (int row = 0; row < arraySize; row++) { mu += flattenA[row * arraySize + idx]; } mu /= (float) arraySize; // Wait here until every mean computations have arrived // Once arrived, then continue to Standard deviation // syncthreads == barrier __syncthreads(); // Standard deviation sigma = 0.0; for (int row = 0; row < arraySize; row++) { sigma += powf((flattenA[row * arraySize + idx] - mu), 2.0); } sigma /= (float) arraySize; // Wait here until every Standard deviation computations have arrived // Once arrived, then continue to compute the final result for B // syncthreads == barrier __syncthreads(); sigma = sqrt(sigma); // Normalization calculation for (int row = 0; row < arraySize; row++) { if (sigma == 0.0) { flattenB[row * arraySize + idx] = 0.0; } else { flattenB[row * arraySize + idx] = (flattenA[row * arraySize + idx] - mu) / sigma; } } } } int main(int argc, char **argv) { // Variables for CUDA float *device_A, *device_B; int nDevices; int cudaRunTimeVersion; int cudaDriverVersion; /* Timing variables */ struct timeval startGPU, stopGPU; /* Elapsed times using gettimeofday() */ struct timezone tzdummy; unsigned long long runtimeGPU; /* Initialize A and B */ initialize_inputs(); // Sanity check after inputs initialization printf("---- Initialized inputs ----\n"); print_arrays(); // Flatten 2D array A & B flattenArray(); // Sanity check after flattening // Usually commented this out ... I only un-conmment it to validate the flattening process went ok // NOTE: This will only print out if N < 10 checkFlatten(flattenA); // After flattening, size of array flattenA will be N * N int arraySize = sizeof(float) * N * N; // Cuda device info cudaGetDeviceCount(&nDevices); getCudaDevices(nDevices); cudaRuntimeGetVersion(&cudaRunTimeVersion); cudaDriverGetVersion(&cudaDriverVersion); // Printing out CUDA runtime & driver version to console printf("Cuda Runtime Version: %i\n", cudaRunTimeVersion); printf("Cuda Driver Version: %i\n", cudaDriverVersion); // Start Clock GPU printf("---------------------------------------------\n"); printf("Matrix size N = %d", N); printf("\nStarting clock for GPU.\n\n"); gettimeofday(&startGPU, &tzdummy); // Allocating space for GPU device cudaMalloc((void**)&device_A, arraySize); cudaMalloc((void**)&device_B, arraySize); // Copying array A from HOST to GPU cudaMemcpy(device_A, flattenA, arraySize, cudaMemcpyHostToDevice); // Launch GPU kernel gpuMatrixNorm gpuMatrixNorm<<<N, N>>>(device_A, device_B, N); // Copying array B from GPU to HOST // Initially I had cudaDeviceSynchronize() before copying B from device to host // However, by reading CUDA's doc further, cudaMemcpy is a blocking method cudaMemcpy(flattenB, device_B, arraySize, cudaMemcpyDeviceToHost); /* Stop Clock */ gettimeofday(&stopGPU, &tzdummy); runtimeGPU = (unsigned long long)(stopGPU.tv_sec - startGPU.tv_sec) * 1000000 + (stopGPU.tv_usec - startGPU.tv_usec); /* Display timing results */ printf("GPU Runtime = %g ms.\n", (float)runtimeGPU/(float)1000); printf("\nStopped clock for GPU."); printf("\n---------------------------------------------\n"); printf("---- Results ----\n"); // Sanity check the result after computes by GPU and deliver back to host machine // Usually commented this out ... I only un-comment it to validate the computed result went ok // NOTE: This will only print out if N < 10 checkFlatten(flattenB); // Freeing memory in GPU device cudaFree(device_A); cudaFree(device_B); exit(0); }
464e7f1701ff01007eecf52762fb122dc0440d98.hip
// !!! This is a file automatically generated by hipify!!! // This example demonstrates parallel floating point vector // addition with a simple __global__ function. #include <stdlib.h> #include <stdio.h> #include <iostream> #include <time.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 512 // this kernel computes the vector sum c = a + b // each thread performs one pair-wise addition void vector_reduction_seq(const float *a, float *c, const size_t n){ for(int i = 0; i < n; i++){ c[0] += a[i]; } } __device__ void warp_reduce(volatile float* sD, int tid) { //unroll last warp (32 threads) sD[tid] += sD[tid + 32]; sD[tid] += sD[tid + 16]; sD[tid] += sD[tid + 8]; sD[tid] += sD[tid + 4]; sD[tid] += sD[tid + 2]; sD[tid] += sD[tid + 1]; } __global__ void vector_reduction(float *a, float *c, const size_t n){ // compute the global element index this thread should process unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; for(unsigned int s=blockDim.x/2; s > 0; s >>= 1) { //binary reduction if (tid < s) { a[i] += a[i + s]; } __syncthreads(); } if (tid == 0) atomicAdd(c, a[i]); } __global__ void vector_reduction_shared(const float* a, float* c, const size_t n) { extern __shared__ float sD[]; unsigned int tid = threadIdx.x; unsigned int blockSize = blockDim.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; sD[tid] = a[i] + a[i+blockSize]; //add on first load __syncthreads(); for(unsigned int s=blockSize/2; s > 32; s >>= 1) { //binary reduction if (tid < s) { sD[tid] += sD[tid + s]; } __syncthreads(); } if (tid < 32) warp_reduce(sD, tid); //unroll last warp for block if (tid == 0) atomicAdd(c,sD[0]); //add each block value to final value } int main(void){ // create arrays of 1M elements const int num_elements = 1<<20; // compute the size of the arrays in bytes const int num_bytes = num_elements * sizeof(int); // points to host & device arrays float *device_array_a = 0; float *device_c = 0; float *host_array_a = 0; float *host_c = 0; // malloc the host arrays host_array_a = (float*)malloc(num_bytes); host_c = (float*)malloc(sizeof(float)); // hipMalloc the device arrays hipMalloc((void**)&device_array_a, num_bytes); hipMalloc((void**)&device_c, sizeof(float)); // if any memory allocation failed, report an error message if(host_array_a == 0 || host_c == 0 || device_array_a == 0 || device_c == 0){ printf("couldn't allocate memory\n"); return 1; } // initialize host_array_a & host_array_b for(int i = 0; i < num_elements; ++i){ // make array a a linear ramp host_array_a[i] = 1; } // copy arrays a & b to the device memory space hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice); const size_t num_launches = 1; double average_seq_time; struct timespec start, end; std::cout << "Timing sequential implementation..."; if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } for(int i = 0; i < num_launches; i++){ vector_reduction_seq(host_array_a, host_c, num_elements); } if( clock_gettime( CLOCK_REALTIME, &end) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } float serialAns = host_c[0]; //compute the time in s average_seq_time = ( end.tv_sec - start.tv_sec ) + (double)( end.tv_nsec - start.tv_nsec ) / 1e+9; //take the average average_seq_time /= num_launches; std::cout << " done." << std::endl; std::cout << average_seq_time << "s" << std::endl; // compute c = a + b on the device const size_t block_size = BLOCK_SIZE; size_t grid_size = num_elements / block_size; // deal with a possible partial final block if(num_elements % block_size) ++grid_size; // time the kernel launches using CUDA events hipEvent_t launch_begin, launch_end; hipEventCreate(&launch_begin); hipEventCreate(&launch_end); float average_time_simple = 0.0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i){ // record a CUDA event immediately before and after the kernel launch hipEventRecord(launch_begin,0); // launch the kernel hipLaunchKernelGGL(( vector_reduction), dim3(grid_size), dim3(block_size), 0, 0, device_array_a, device_c, num_elements); hipEventRecord(launch_end,0); hipEventSynchronize(launch_end); float time = 0.0; // measure the time (ms) spent in the kernel hipEventElapsedTime(&time, launch_begin, launch_end); average_time_simple += time; } // copy the result back to the host memory space hipMemcpy(host_c, device_c, sizeof(float), hipMemcpyDeviceToHost); std::cout << serialAns << " " << host_c[0] << std::endl; if (serialAns != host_c[0]) return 0; average_time_simple /= num_launches; std::cout << " done." << std::endl; std::cout << average_time_simple << "ms" << std::endl; hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice); host_c[0] = 0; hipMemcpy(device_c, host_c, sizeof(float), hipMemcpyHostToDevice); float average_time_shared = 0.0; std::cout << "Timing shared implementation..."; for(int i = 0; i < num_launches; ++i){ // record a CUDA event immediately before and after the kernel launch hipEventRecord(launch_begin,0); // launch the kernel hipLaunchKernelGGL(( vector_reduction_shared), dim3(grid_size), dim3(block_size/2), (block_size/2)*sizeof(float), 0, device_array_a, device_c, num_elements); hipEventRecord(launch_end,0); hipEventSynchronize(launch_end); float time = 0.0; // measure the time (ms) spent in the kernel hipEventElapsedTime(&time, launch_begin, launch_end); average_time_shared += time; } // copy the result back to the host memory space hipMemcpy(host_c, device_c, sizeof(float), hipMemcpyDeviceToHost); std::cout << serialAns << " " << host_c[0] << std::endl; if (serialAns != host_c[0]) return 0; average_time_shared /= num_launches; std::cout << " done." << std::endl; std::cout << average_time_shared << "ms" << std::endl; float num_ops=num_elements; float seq_throughput = num_ops / (average_seq_time) / 1000000000.0f; float simple_throughput = num_ops / (average_time_simple / 1000.0f) / 1000000000.0f; float shared_throughput = num_ops / (average_time_shared / 1000.0f) / 1000000000.0f; std::cout << "Throughput of sequential: " << seq_throughput << " GB/s" << std::endl; std::cout << "Throughput of simple kernel: " << simple_throughput << " GB/s" << std::endl; std::cout << "Simple performance improvement: " << simple_throughput / seq_throughput << "x" << std::endl; std::cout << "Throughput of shared kernel: " << shared_throughput << " GB/s" << std::endl; std::cout << "Shared performance improvement: " << shared_throughput / seq_throughput << "x" << std::endl; std::cout << "Shared performance over simple improvement: " << shared_throughput / simple_throughput << "x" << std::endl; hipEventDestroy(launch_begin); hipEventDestroy(launch_end); // deallocate memory free(host_array_a); free(host_c); hipFree(device_array_a); hipFree(device_c); }
464e7f1701ff01007eecf52762fb122dc0440d98.cu
// This example demonstrates parallel floating point vector // addition with a simple __global__ function. #include <stdlib.h> #include <stdio.h> #include <iostream> #include <time.h> #include <unistd.h> #include <sys/time.h> #include <cuda_runtime.h> #define BLOCK_SIZE 512 // this kernel computes the vector sum c = a + b // each thread performs one pair-wise addition void vector_reduction_seq(const float *a, float *c, const size_t n){ for(int i = 0; i < n; i++){ c[0] += a[i]; } } __device__ void warp_reduce(volatile float* sD, int tid) { //unroll last warp (32 threads) sD[tid] += sD[tid + 32]; sD[tid] += sD[tid + 16]; sD[tid] += sD[tid + 8]; sD[tid] += sD[tid + 4]; sD[tid] += sD[tid + 2]; sD[tid] += sD[tid + 1]; } __global__ void vector_reduction(float *a, float *c, const size_t n){ // compute the global element index this thread should process unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; for(unsigned int s=blockDim.x/2; s > 0; s >>= 1) { //binary reduction if (tid < s) { a[i] += a[i + s]; } __syncthreads(); } if (tid == 0) atomicAdd(c, a[i]); } __global__ void vector_reduction_shared(const float* a, float* c, const size_t n) { extern __shared__ float sD[]; unsigned int tid = threadIdx.x; unsigned int blockSize = blockDim.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; sD[tid] = a[i] + a[i+blockSize]; //add on first load __syncthreads(); for(unsigned int s=blockSize/2; s > 32; s >>= 1) { //binary reduction if (tid < s) { sD[tid] += sD[tid + s]; } __syncthreads(); } if (tid < 32) warp_reduce(sD, tid); //unroll last warp for block if (tid == 0) atomicAdd(c,sD[0]); //add each block value to final value } int main(void){ // create arrays of 1M elements const int num_elements = 1<<20; // compute the size of the arrays in bytes const int num_bytes = num_elements * sizeof(int); // points to host & device arrays float *device_array_a = 0; float *device_c = 0; float *host_array_a = 0; float *host_c = 0; // malloc the host arrays host_array_a = (float*)malloc(num_bytes); host_c = (float*)malloc(sizeof(float)); // cudaMalloc the device arrays cudaMalloc((void**)&device_array_a, num_bytes); cudaMalloc((void**)&device_c, sizeof(float)); // if any memory allocation failed, report an error message if(host_array_a == 0 || host_c == 0 || device_array_a == 0 || device_c == 0){ printf("couldn't allocate memory\n"); return 1; } // initialize host_array_a & host_array_b for(int i = 0; i < num_elements; ++i){ // make array a a linear ramp host_array_a[i] = 1; } // copy arrays a & b to the device memory space cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice); const size_t num_launches = 1; double average_seq_time; struct timespec start, end; std::cout << "Timing sequential implementation..."; if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } for(int i = 0; i < num_launches; i++){ vector_reduction_seq(host_array_a, host_c, num_elements); } if( clock_gettime( CLOCK_REALTIME, &end) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } float serialAns = host_c[0]; //compute the time in s average_seq_time = ( end.tv_sec - start.tv_sec ) + (double)( end.tv_nsec - start.tv_nsec ) / 1e+9; //take the average average_seq_time /= num_launches; std::cout << " done." << std::endl; std::cout << average_seq_time << "s" << std::endl; // compute c = a + b on the device const size_t block_size = BLOCK_SIZE; size_t grid_size = num_elements / block_size; // deal with a possible partial final block if(num_elements % block_size) ++grid_size; // time the kernel launches using CUDA events cudaEvent_t launch_begin, launch_end; cudaEventCreate(&launch_begin); cudaEventCreate(&launch_end); float average_time_simple = 0.0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i){ // record a CUDA event immediately before and after the kernel launch cudaEventRecord(launch_begin,0); // launch the kernel vector_reduction<<<grid_size, block_size>>>(device_array_a, device_c, num_elements); cudaEventRecord(launch_end,0); cudaEventSynchronize(launch_end); float time = 0.0; // measure the time (ms) spent in the kernel cudaEventElapsedTime(&time, launch_begin, launch_end); average_time_simple += time; } // copy the result back to the host memory space cudaMemcpy(host_c, device_c, sizeof(float), cudaMemcpyDeviceToHost); std::cout << serialAns << " " << host_c[0] << std::endl; if (serialAns != host_c[0]) return 0; average_time_simple /= num_launches; std::cout << " done." << std::endl; std::cout << average_time_simple << "ms" << std::endl; cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice); host_c[0] = 0; cudaMemcpy(device_c, host_c, sizeof(float), cudaMemcpyHostToDevice); float average_time_shared = 0.0; std::cout << "Timing shared implementation..."; for(int i = 0; i < num_launches; ++i){ // record a CUDA event immediately before and after the kernel launch cudaEventRecord(launch_begin,0); // launch the kernel vector_reduction_shared<<<grid_size, block_size/2, (block_size/2)*sizeof(float)>>>(device_array_a, device_c, num_elements); cudaEventRecord(launch_end,0); cudaEventSynchronize(launch_end); float time = 0.0; // measure the time (ms) spent in the kernel cudaEventElapsedTime(&time, launch_begin, launch_end); average_time_shared += time; } // copy the result back to the host memory space cudaMemcpy(host_c, device_c, sizeof(float), cudaMemcpyDeviceToHost); std::cout << serialAns << " " << host_c[0] << std::endl; if (serialAns != host_c[0]) return 0; average_time_shared /= num_launches; std::cout << " done." << std::endl; std::cout << average_time_shared << "ms" << std::endl; float num_ops=num_elements; float seq_throughput = num_ops / (average_seq_time) / 1000000000.0f; float simple_throughput = num_ops / (average_time_simple / 1000.0f) / 1000000000.0f; float shared_throughput = num_ops / (average_time_shared / 1000.0f) / 1000000000.0f; std::cout << "Throughput of sequential: " << seq_throughput << " GB/s" << std::endl; std::cout << "Throughput of simple kernel: " << simple_throughput << " GB/s" << std::endl; std::cout << "Simple performance improvement: " << simple_throughput / seq_throughput << "x" << std::endl; std::cout << "Throughput of shared kernel: " << shared_throughput << " GB/s" << std::endl; std::cout << "Shared performance improvement: " << shared_throughput / seq_throughput << "x" << std::endl; std::cout << "Shared performance over simple improvement: " << shared_throughput / simple_throughput << "x" << std::endl; cudaEventDestroy(launch_begin); cudaEventDestroy(launch_end); // deallocate memory free(host_array_a); free(host_c); cudaFree(device_array_a); cudaFree(device_c); }
e29512c9af74469fbe639846608ba10b450e7a4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--blockDim=1024 --gridDim=1 --no-inline // In CUDA providing the inline keyword should still keep a copy of // the function around (contrary to OpenCL). However, by default a // function with this keyword is not actually inlined at the optimisation // level used by GPUVerify. #define tid threadIdx.x __device__ inline void inlined(int *A, int offset) { int temp = A[tid + offset]; A[tid] += temp; } __global__ void inline_test(int *A, int offset) { inlined(A, offset); }
e29512c9af74469fbe639846608ba10b450e7a4d.cu
//pass //--blockDim=1024 --gridDim=1 --no-inline // In CUDA providing the inline keyword should still keep a copy of // the function around (contrary to OpenCL). However, by default a // function with this keyword is not actually inlined at the optimisation // level used by GPUVerify. #define tid threadIdx.x __device__ inline void inlined(int *A, int offset) { int temp = A[tid + offset]; A[tid] += temp; } __global__ void inline_test(int *A, int offset) { inlined(A, offset); }